diff --git a/.gitignore b/.gitignore index 80fd850cd4483791afc19fc19732f5d0eadd8e2e..c25f60075cc9ce8f681cdefae308411cdb73657d 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,10 @@ pysim/ tests/script/api/batchprepare taosadapter taosadapter-debug +tools/taos-tools/* +tools/taosws-rs/* +tools/taosadapter/* +tools/upx* # Doxygen Generated files html/ diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 7d5515f8c59b2cd9cd5099a9b55b6ca88d6ce37b..0000000000000000000000000000000000000000 --- a/.gitmodules +++ /dev/null @@ -1,12 +0,0 @@ -[submodule "src/connector/go"] - path = src/connector/go - url = git@github.com:taosdata/driver-go.git -[submodule "src/connector/hivemq-tdengine-extension"] - path = src/connector/hivemq-tdengine-extension - url = git@github.com:taosdata/hivemq-tdengine-extension.git -[submodule "deps/TSZ"] - path = deps/TSZ - url = https://github.com/taosdata/TSZ.git -[submodule "examples/rust"] - path = examples/rust - url = https://github.com/songtianyi/tdengine-rust-bindings.git diff --git a/Jenkinsfile2 b/Jenkinsfile2 index e9e7538872d5b03f73423f7353fafc9e0fd5bb39..4b47d56a6c86840c35dcdbe79ffbe6a45fe1323f 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -118,6 +118,7 @@ def pre_test(){ git rm --cached tools/taos-tools 2>/dev/null || : git rm --cached tools/taosadapter 2>/dev/null || : git rm --cached tools/taosws-rs 2>/dev/null || : + git rm --cached examples/rust 2>/dev/null || : ''' sh ''' cd ${WKC} @@ -269,6 +270,7 @@ def pre_test_win(){ git rm --cached tools/taos-tools 2>nul git rm --cached tools/taosadapter 2>nul git rm --cached tools/taosws-rs 2>nul + git rm --cached examples/rust 2>nul exit 0 ''' bat ''' diff --git a/cmake/cmake.options b/cmake/cmake.options index d2dd48c1a0f00fcbb342b06a61b77f9018c59d7b..bec64f7bf00cdb0c6fddc713af0801eae08d45ea 100644 --- a/cmake/cmake.options +++ b/cmake/cmake.options @@ -90,6 +90,12 @@ ELSE () ENDIF () ENDIF () +option( + RUST_BINDINGS + "If build with rust-bindings" + ON + ) + option( JEMALLOC_ENABLED "If build with jemalloc" @@ -157,18 +163,6 @@ option( ON ) -option( - BUILD_WITH_CRAFT - "If build with canonical-raft" - OFF -) - -option( - BUILD_WITH_TRAFT - "If build with traft" - OFF -) - IF(${TD_LINUX} MATCHES TRUE) option( diff --git a/cmake/craft_CMakeLists.txt.in b/cmake/craft_CMakeLists.txt.in deleted file mode 100644 index f0ec7feaf204cf73c6e51e5a429fa8d64d6ec01a..0000000000000000000000000000000000000000 --- a/cmake/craft_CMakeLists.txt.in +++ /dev/null @@ -1,14 +0,0 @@ - -# canonical-raft -ExternalProject_Add(craft - GIT_REPOSITORY https://github.com/canonical/raft.git - GIT_TAG v0.11.2 - SOURCE_DIR "${TD_CONTRIB_DIR}/craft" - BINARY_DIR "${TD_CONTRIB_DIR}/craft" - #BUILD_IN_SOURCE TRUE - # https://answers.ros.org/question/333125/how-to-include-external-automakeautoconf-projects-into-ament_cmake/ - CONFIGURE_COMMAND COMMAND autoreconf -i COMMAND ./configure --enable-example - BUILD_COMMAND "$(MAKE)" - INSTALL_COMMAND "" - TEST_COMMAND "" -) diff --git a/cmake/rust-bindings_CMakeLists.txt.in b/cmake/rust-bindings_CMakeLists.txt.in new file mode 100644 index 0000000000000000000000000000000000000000..d16e86139b20fa94505953bc56108f1f61dbbffb --- /dev/null +++ b/cmake/rust-bindings_CMakeLists.txt.in @@ -0,0 +1,12 @@ + +# rust-bindings +ExternalProject_Add(rust-bindings + GIT_REPOSITORY https://github.com/songtianyi/tdengine-rust-bindings.git + GIT_TAG 7ed7a97 + SOURCE_DIR "${TD_SOURCE_DIR}/examples/rust" + BINARY_DIR "${TD_SOURCE_DIR}/examples/rust" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index af3b5af4a6dea615f73ab29863b37cb14b9f4caf..68af4e7fcb854295259aa5d7a83873ba9f1dac0e 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -1,5 +1,5 @@ -# zlib +# taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git GIT_TAG df8678f diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 8c3a4d23569fd963008d285efd11a834082c3160..8d8aa0b741605e9eaab16feee5df677802a732b0 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -1,8 +1,8 @@ -# zlib +# taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG c529299 + GIT_TAG f4e456a SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in index 4b7c2644728fa8de0f989eafca2f914584b9b7aa..3c1a7f5e73f23bfcbca7196450bcddfb1ea71d32 100644 --- a/cmake/taosws_CMakeLists.txt.in +++ b/cmake/taosws_CMakeLists.txt.in @@ -1,5 +1,5 @@ -# zlib +# taosws-rs ExternalProject_Add(taosws-rs GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git GIT_TAG 9de599d diff --git a/cmake/traft_CMakeLists.txt.in b/cmake/traft_CMakeLists.txt.in deleted file mode 100644 index 0934e2c35c9be30f8f3724f731bddde1ec0c7421..0000000000000000000000000000000000000000 --- a/cmake/traft_CMakeLists.txt.in +++ /dev/null @@ -1,14 +0,0 @@ - -# traft -ExternalProject_Add(traft - GIT_REPOSITORY https://github.com/taosdata/traft.git - GIT_TAG for_3.0 - SOURCE_DIR "${TD_CONTRIB_DIR}/traft" - BINARY_DIR "${TD_CONTRIB_DIR}/traft" - #BUILD_IN_SOURCE TRUE - # https://answers.ros.org/question/333125/how-to-include-external-automakeautoconf-projects-into-ament_cmake/ - CONFIGURE_COMMAND COMMAND autoreconf -i COMMAND ./configure - BUILD_COMMAND "$(MAKE)" - INSTALL_COMMAND "" - TEST_COMMAND "" -) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 8488dfb981a2c19ba0c35359b1fa728e3b6a114f..b4e8825431475c09fbf925671e6d7f691c700b15 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -105,6 +105,11 @@ if(${BUILD_WITH_SQLITE}) cat("${TD_SUPPORT_DIR}/sqlite_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif(${BUILD_WITH_SQLITE}) +# rust-bindings +if(${RUST_BINDINGS}) + cat("${TD_SUPPORT_DIR}/rust-bindings_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +endif(${RUST_BINDINGS}) + # lucene if(${BUILD_WITH_LUCENE}) cat("${TD_SUPPORT_DIR}/lucene_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -135,6 +140,24 @@ execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") execute_process(COMMAND "${CMAKE_COMMAND}" --build . WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download") + +# clear submodule +execute_process(COMMAND git submodule deinit -f tools/taos-tools + WORKING_DIRECTORY "${TD_SOURCE_DIR}") +execute_process(COMMAND git rm --cached tools/taos-tools + WORKING_DIRECTORY "${TD_SOURCE_DIR}") +execute_process(COMMAND git submodule deinit -f tools/taosadapter + WORKING_DIRECTORY "${TD_SOURCE_DIR}") +execute_process(COMMAND git rm --cached tools/taosadapter + WORKING_DIRECTORY "${TD_SOURCE_DIR}") +execute_process(COMMAND git submodule deinit -f tools/taosws-rs + WORKING_DIRECTORY "${TD_SOURCE_DIR}") +execute_process(COMMAND git rm --cached tools/taosws-rs + WORKING_DIRECTORY "${TD_SOURCE_DIR}") +execute_process(COMMAND git submodule deinit -f examples/rust + WORKING_DIRECTORY "${TD_SOURCE_DIR}") +execute_process(COMMAND git rm --cached examples/rust + WORKING_DIRECTORY "${TD_SOURCE_DIR}") # ================================================================================================ # Build diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index c5cbf5a70b7e49324ebb1e08cb31eac9098a74d6..6375422b07a2ee7d5c9b6a0074060a39888da773 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -273,7 +273,7 @@ SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHER **Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR. If input strings contain NULL value, the result is NULL. -**Applicable data types**: VARCHAR, NCHAR. At least 2 input strings are requird, and at most 8 input strings are allowed. +**Applicable data types**: VARCHAR, NCHAR. At least 2 input strings are required, and at most 8 input strings are allowed. **Applicable table types**: table, STable. @@ -290,7 +290,7 @@ SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | st **Return value type**: If all input strings are VARCHAR type, the result is VARCHAR type too. If any one of input strings is NCHAR type, then the result is NCHAR. If input strings contain NULL value, the result is NULL. -**Applicable data types**: VARCHAR, NCHAR. At least 3 input strings are requird, and at most 9 input strings are allowed. +**Applicable data types**: VARCHAR, NCHAR. At least 3 input strings are required, and at most 9 input strings are allowed. **Applicable table types**: table, STable. @@ -527,6 +527,7 @@ SELECT TIMEDIFF(ts1 | datetime_string1, ts2 | datetime_string2 [, time_unit]) FR - Time unit specified by `time_unit` can be: 1b(nanosecond), 1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day),1w(week). - If `time_unit` parameter is not specified, the precision of the returned time duration is same as the precision set for the current database in use. +- If input date-time string cannot be converted to UNIX timestamp, NULL value is returned. #### TIMETRUNCATE @@ -547,6 +548,7 @@ SELECT TIMETRUNCATE(ts_val | datetime_string | ts_col, time_unit) FROM { tb_name - Time unit specified by `time_unit` can be: 1b(nanosecond),1u(microsecond),1a(millisecond),1s(second),1m(minute),1h(hour),1d(day),1w(week). - The precision of the returned timestamp is same as the precision set for the current database in use. +- If input date-time string cannot be converted to UNIX timestamp, NULL value is returned. #### TIMEZONE @@ -599,7 +601,7 @@ FROM { tb_name | stb_name } [WHERE clause] **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -654,7 +656,7 @@ SELECT ELAPSED(field_name[, time_unit]) FROM { tb_name | stb_name } [WHERE claus **Return value type**:DOUBLE. -**Applicable Column type**:TIMESTAMP. +**Applicable data type**:TIMESTAMP. **Applicable tables**: table, STable, outter in nested query. @@ -693,7 +695,7 @@ SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -709,7 +711,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause]; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -723,7 +725,7 @@ SELECT SUM(field_name) FROM tb_name [WHERE clause]; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -737,7 +739,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: INTEGER. -**Applicable column types**: All data types. +**Applicable data types**: All data types. **More explanations**: The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. @@ -751,14 +753,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam **Return value type**:DOUBLE or BIGINT, depends on normalized parameter settings. -**Applicable column type**:Numerical types. +**Applicable data type**:Numerical types. **Applicable table types**: table, STable. **Explanations**: -1. bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 -2. bin_description: parameter to describe the rule to generate buckets,can be in the following JSON formats for each bin_type respectively: +- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。 +- bin_description: parameter to describe the rule to generate buckets,can be in the following JSON formats for each bin_type respectively: - "user_input": "[1, 3, 5, 7]": User specified bin values. @@ -776,7 +778,7 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins. The above "log_bin" descriptor generates a set of bins:[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]. -3. normalized: setting to 1/0 to turn on/off result normalization. +- normalized: setting to 1/0 to turn on/off result normalization. ### PERCENTILE @@ -788,7 +790,7 @@ SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause]; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table. @@ -808,7 +810,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: Same as the column being operated upon. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -828,7 +830,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: Same as the column being operated upon. -**Applicable column types**: All data types. +**Applicable data types**: All data types. **Applicable table types**: table, STable. @@ -848,7 +850,7 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] [ **Return value type**: Same as the column being operated upon. -**Applicable column types**: Numeric data types. +**Applicable data types**: Numeric data types. **Applicable table types**: table, STable, nested query. @@ -872,7 +874,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: Same as the column being operated upon. -**Applicable column types**: All data types. +**Applicable data types**: All data types. **Applicable table types**: table, STable. @@ -892,7 +894,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name }; **Return value type**: Same as the column being operated upon. -**Applicable column types**: All data type. +**Applicable data types**: All data type. **Applicable table types**: table, STable. @@ -911,7 +913,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: Same as the data type of the column being operated upon. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -925,7 +927,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause]; **Return value type**: Same as the data type of the column being operated upon. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -939,7 +941,7 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause]; **Return value type**:Same as the data type of the column being operated upon. -**Applicable column types**: All data types. +**Applicable data types**: All data types. **More explanations**:Considering the number of returned result set is unpredictable, it's suggested to limit the number of unique values to 100,000, otherwise error will be returned. @@ -976,7 +978,7 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause]; **Return value type**: Same as the column being operated upon. -**Applicable column types**: All data types. +**Applicable data types**: All data types. ### TOP @@ -988,7 +990,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause]; **Return value type**: Same as the column being operated upon. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -1008,7 +1010,7 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause]; **Return value type**: Same as the column or tag being operated upon. -**Applicable column types**: All data types. +**Applicable data types**: All data types. **More explanations**: @@ -1050,7 +1052,7 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -1070,7 +1072,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **Return value type**: Same as the column being operated upon. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -1090,7 +1092,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. @@ -1183,7 +1185,7 @@ SELECT TWA(field_name) FROM tb_name WHERE clause; **Return value type**: DOUBLE. -**Applicable column types**: Numeric types. +**Applicable data types**: Numeric types. **Applicable table types**: table, STable. diff --git a/docs/examples/go/connect/afconn/main.go b/docs/examples/go/connect/afconn/main.go index 2d36ef03ab049a313ed0416f552397ff252e022e..3c16212a453eef9482159f5f32c406e07f657ef5 100644 --- a/docs/examples/go/connect/afconn/main.go +++ b/docs/examples/go/connect/afconn/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" - "github.com/taosdata/driver-go/v2/af" + "github.com/taosdata/driver-go/v3/af" ) func main() { diff --git a/docs/examples/go/connect/cgoexample/main.go b/docs/examples/go/connect/cgoexample/main.go index ba7ed0f728a1cd546dbc3199ce4c0dc854ebee91..57b8d5d91f0d8f5e5b3d5890906f388f96fd1126 100644 --- a/docs/examples/go/connect/cgoexample/main.go +++ b/docs/examples/go/connect/cgoexample/main.go @@ -4,7 +4,7 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) func main() { diff --git a/docs/examples/go/connect/restexample/main.go b/docs/examples/go/connect/restexample/main.go index 1efc98b988c183c4c680884057bf2a72a9dd19e9..f10151f04fc0b4b213a235be1bef17a8fab49e4c 100644 --- a/docs/examples/go/connect/restexample/main.go +++ b/docs/examples/go/connect/restexample/main.go @@ -4,7 +4,7 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { diff --git a/docs/examples/go/connect/wrapper/main.go b/docs/examples/go/connect/wrapper/main.go index d7e71a8baf229012b6944aad0eaac232924dd667..a459539db34ae7e37f619712cd8e142580116d8f 100644 --- a/docs/examples/go/connect/wrapper/main.go +++ b/docs/examples/go/connect/wrapper/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" - "github.com/taosdata/driver-go/v2/wrapper" + "github.com/taosdata/driver-go/v3/wrapper" ) func main() { diff --git a/docs/examples/go/go.mod b/docs/examples/go/go.mod index 5945e395e93b373d47fe71f3584c37fed9526638..2bc1a74cb6ef14221fa384701773dc73fe3b161d 100644 --- a/docs/examples/go/go.mod +++ b/docs/examples/go/go.mod @@ -2,5 +2,5 @@ module goexample go 1.17 -require github.com/taosdata/driver-go/v2 develop +require github.com/taosdata/driver-go/v3 3.0 diff --git a/docs/examples/go/insert/json/main.go b/docs/examples/go/insert/json/main.go index 6be375270e32a5091c015f88de52c9dda2246b59..805c123342ef6564239d5e7122bb9a7960f5c95e 100644 --- a/docs/examples/go/insert/json/main.go +++ b/docs/examples/go/insert/json/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" - "github.com/taosdata/driver-go/v2/af" + "github.com/taosdata/driver-go/v3/af" ) func prepareDatabase(conn *af.Connector) { diff --git a/docs/examples/go/insert/line/main.go b/docs/examples/go/insert/line/main.go index c17e1a5270850e6a8b497e0dbec4ae714ee1e2d6..1a09edc40cc6e600190c781d7df2c1c6f4fad5cf 100644 --- a/docs/examples/go/insert/line/main.go +++ b/docs/examples/go/insert/line/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" - "github.com/taosdata/driver-go/v2/af" + "github.com/taosdata/driver-go/v3/af" ) func prepareDatabase(conn *af.Connector) { diff --git a/docs/examples/go/insert/sql/main.go b/docs/examples/go/insert/sql/main.go index 6cd5f860e65f4fffd139668f69cc1772f5310eae..a493ef5fb0a6418b30aa55911a713cf64ce75def 100644 --- a/docs/examples/go/insert/sql/main.go +++ b/docs/examples/go/insert/sql/main.go @@ -4,7 +4,7 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func createStable(taos *sql.DB) { diff --git a/docs/examples/go/insert/stmt/main.go b/docs/examples/go/insert/stmt/main.go index 7093fdf1e52bc5a14fc92cec995fd81e70717d9f..d23a6e345c6c14f6cfeb8a21a81be296fb19f70e 100644 --- a/docs/examples/go/insert/stmt/main.go +++ b/docs/examples/go/insert/stmt/main.go @@ -4,9 +4,9 @@ import ( "fmt" "time" - "github.com/taosdata/driver-go/v2/af" - "github.com/taosdata/driver-go/v2/af/param" - "github.com/taosdata/driver-go/v2/common" + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/param" + "github.com/taosdata/driver-go/v3/common" ) func checkErr(err error, prompt string) { diff --git a/docs/examples/go/insert/telnet/main.go b/docs/examples/go/insert/telnet/main.go index 91fafbe71adbf60d9341b903f5a25708b7011852..4cf4375db5be6c4cca53bbcce877bf598ac326e7 100644 --- a/docs/examples/go/insert/telnet/main.go +++ b/docs/examples/go/insert/telnet/main.go @@ -3,7 +3,7 @@ package main import ( "fmt" - "github.com/taosdata/driver-go/v2/af" + "github.com/taosdata/driver-go/v3/af" ) func prepareDatabase(conn *af.Connector) { diff --git a/docs/examples/go/query/sync/main.go b/docs/examples/go/query/sync/main.go index 0326c7a7da5da212f66b1421bb46e5972bef9609..add422e3859bee88febef83ad5666550e67adf5b 100644 --- a/docs/examples/go/query/sync/main.go +++ b/docs/examples/go/query/sync/main.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { diff --git a/docs/examples/go/sub/main.go b/docs/examples/go/sub/main.go index c44521b34026ebf6f84f70022a7c2d1f83e9b781..7721eed1346f254e104bb41a2135171ebd518e2c 100644 --- a/docs/examples/go/sub/main.go +++ b/docs/examples/go/sub/main.go @@ -1,53 +1,120 @@ package main import ( - "database/sql/driver" + "context" + "encoding/json" "fmt" - "io" - "os" + "strconv" "time" - taos "github.com/taosdata/driver-go/v2/af" + "github.com/taosdata/driver-go/v3/af" + "github.com/taosdata/driver-go/v3/af/tmq" + "github.com/taosdata/driver-go/v3/common" + "github.com/taosdata/driver-go/v3/errors" + "github.com/taosdata/driver-go/v3/wrapper" ) func main() { - db, err := taos.Open("", "", "", "log", 0) + db, err := af.Open("", "root", "taosdata", "", 0) if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) + panic(err) } defer db.Close() - topic, err := db.Subscribe(false, "taoslogtail", "select ts, level, ipaddr, content from log", time.Second) + _, err = db.Exec("create database if not exists example_tmq") if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(2) + panic(err) + } + _, err = db.Exec("create topic if not exists example_tmq_topic with meta as DATABASE example_tmq") + if err != nil { + panic(err) + } + config := tmq.NewConfig() + defer config.Destroy() + err = config.SetGroupID("test") + if err != nil { + panic(err) + } + err = config.SetAutoOffsetReset("earliest") + if err != nil { + panic(err) + } + err = config.SetConnectIP("127.0.0.1") + if err != nil { + panic(err) + } + err = config.SetConnectUser("root") + if err != nil { + panic(err) + } + err = config.SetConnectPass("taosdata") + if err != nil { + panic(err) + } + err = config.SetConnectPort("6030") + if err != nil { + panic(err) + } + err = config.SetMsgWithTableName(true) + if err != nil { + panic(err) + } + err = config.EnableHeartBeat() + if err != nil { + panic(err) + } + err = config.EnableAutoCommit(func(result *wrapper.TMQCommitCallbackResult) { + if result.ErrCode != 0 { + errStr := wrapper.TMQErr2Str(result.ErrCode) + err := errors.NewError(int(result.ErrCode), errStr) + panic(err) + } + }) + if err != nil { + panic(err) + } + consumer, err := tmq.NewConsumer(config) + if err != nil { + panic(err) + } + err = consumer.Subscribe([]string{"example_tmq_topic"}) + if err != nil { + panic(err) + } + _, err = db.Exec("create table example_tmq.t1 (ts timestamp,v int)") + if err != nil { + panic(err) } - defer topic.Unsubscribe(true) for { - func() { - rows, err := topic.Consume() - defer func() { rows.Close(); time.Sleep(time.Second) }() - if err != nil { - fmt.Println(err) - os.Exit(3) - } - for { - values := make([]driver.Value, 4) - err := rows.Next(values) - if err == io.EOF { - break - } else if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(4) - } - ts := values[0].(time.Time) - level := values[1].(int8) - ipaddr := values[2].(string) - content := values[3].(string) - fmt.Printf("%s %d %s %s\n", ts.Format(time.StampMilli), level, ipaddr, content) - } - }() + result, err := consumer.Poll(time.Second) + if err != nil { + panic(err) + } + if result.Type != common.TMQ_RES_TABLE_META { + panic("want message type 2 got " + strconv.Itoa(int(result.Type))) + } + data, _ := json.Marshal(result.Meta) + fmt.Println(string(data)) + consumer.Commit(context.Background(), result.Message) + consumer.FreeMessage(result.Message) + break + } + _, err = db.Exec("insert into example_tmq.t1 values(now,1)") + if err != nil { + panic(err) } + for { + result, err := consumer.Poll(time.Second) + if err != nil { + panic(err) + } + if result.Type != common.TMQ_RES_DATA { + panic("want message type 1 got " + strconv.Itoa(int(result.Type))) + } + data, _ := json.Marshal(result.Data) + fmt.Println(string(data)) + consumer.Commit(context.Background(), result.Message) + consumer.FreeMessage(result.Message) + break + } + consumer.Close() } - -// 未完成 diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md index d2e6706892f3997af115e71d1da455ebce2ecbec..0db1df0de99d8e33c246091c950ab66334e059e4 100644 --- a/docs/zh/01-index.md +++ b/docs/zh/01-index.md @@ -4,11 +4,11 @@ sidebar_label: 文档首页 slug: / --- -TDengine 是一款[高性能](https://www.taosdata.com/fast)、[分布式](https://www.taosdata.com/scalable)、[支持 SQL](https://www.taosdata.com/sql-support) 的时序数据库 (Database)。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。 +TDengine是一款开源、[高性能](https://www.taosdata.com/fast)、云原生的专为物联网、工业互联网、金融等优化设计的时序数据库(Time-Series Database)。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。 TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。 -如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、连续查询、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。 +如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。 我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[集群管理](./cluster)一章。 diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index 7e4f3420b8afd98aea3a0906544df191797a527f..bbf6b52eb985bef2decebcacd27d777bd1999b1f 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -401,7 +401,7 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] **返回结果类型**:CAST 中指定的类型(type_name)。 -**适用数据类型**:输入参数 expression 的类型可以是BLOB、MEDIUMBLOB和JSON外的所有类型。 +**适用数据类型**:输入参数 expression 的类型可以是除JSON外的所有类型。 **嵌套子查询支持**:适用于内层查询和外层查询。 @@ -410,10 +410,10 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] **使用说明**: - 对于不能支持的类型转换会直接报错。 -- 对于类型支持但某些值无法正确转换的情况对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况: +- 对于类型支持但某些值无法正确转换的情况,对应的转换后的值以转换函数输出为准。目前可能遇到的几种情况: 1)字符串类型转换数值类型时可能出现的无效字符情况,例如"a"可能转为0,但不会报错。 2)转换到数值类型时,数值大于type_name可表示的范围时,则会溢出,但不会报错。 - 3)转换到字符串类型时,如果转换后长度超过type_name的长度,则会截断,但不会报错。 + 3)转换到字符串类型时,如果转换后长度超过type_name中指定的长度,则会截断,但不会报错。 #### TO_ISO8601 @@ -421,7 +421,7 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause] SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; ``` -**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果附带当前客户端的系统时区信息。 +**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。 **返回结果数据类型**:VARCHAR 类型。 @@ -435,7 +435,7 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause]; - timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。 - 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定; -- 如果输入是 TIMSTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 +- 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。 #### TO_JSON @@ -516,7 +516,7 @@ SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM **功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。 -**返回结果数据类型**:BIGINT。输入包含不符合时间日期格式字符串则返回 NULL。 +**返回结果数据类型**:BIGINT。 **应用字段**:表示 UNIX 时间戳的 BIGINT, TIMESTAMP 类型,或符合日期时间格式的 VARCHAR, NCHAR 类型。 @@ -528,6 +528,7 @@ SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM - 支持的时间单位 time_unit 如下: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 - 如果时间单位 time_unit 未指定, 返回的时间差值精度与当前 DATABASE 设置的时间精度一致。 +- 输入包含不符合时间日期格式的字符串则返回 NULL。 #### TIMETRUNCATE @@ -548,6 +549,7 @@ SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name - 支持的时间单位 time_unit 如下: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。 - 返回的时间戳精度与当前 DATABASE 设置的时间精度一致。 +- 输入包含不符合时间日期格式的字符串则返回 NULL。 #### TIMEZONE @@ -659,8 +661,6 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE **适用数据类型**:TIMESTAMP。 -**支持的版本**:2.6.0.0 及以后的版本。 - **适用于**: 表,超级表,嵌套查询的外层查询 **说明**: @@ -767,19 +767,19 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam **适用于**: 表和超级表。 **详细说明**: -1. bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 -2. bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): +- bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。 +- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串): - "user_input": "[1, 3, 5, 7]" 用户指定 bin 的具体数值。 - "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}" - "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点, + "start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, 生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。 - "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}" - "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点跟终点, + "start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点, 生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。 -3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 +- normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。 ### PERCENTILE @@ -958,20 +958,20 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause]; SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` - **功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。 +**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。 - **返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。 +**返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。 - **适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。 +**适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。 - **嵌套子查询支持**: 适用于内层查询和外层查询。 +**嵌套子查询支持**: 适用于内层查询和外层查询。 - **适用于**:表和超级表。 +**适用于**:表和超级表。 - **使用说明**: - - - 不能参与表达式计算;该函数可以应用在普通表和超级表上; - - 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。 +**使用说明**: + +- 不能参与表达式计算;该函数可以应用在普通表和超级表上; +- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。 ### TAIL @@ -1048,9 +1048,9 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause] **使用说明**: - - 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 - - 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 - - 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 +- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。 +- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。 +- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 ### DERIVATIVE @@ -1069,8 +1069,8 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER **使用说明**: - - DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。 - - 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 +- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。 +- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。 ### DIFF @@ -1088,8 +1088,8 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER **使用说明**: - - 输出结果行数是范围内总行数减一,第一行没有结果输出。 - - 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。 +- 输出结果行数是范围内总行数减一,第一行没有结果输出。 +- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。 ### IRATE @@ -1113,21 +1113,21 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause; SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause] ``` - **功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。 +**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。 - **返回结果类型**: DOUBLE。 +**返回结果类型**: DOUBLE。 - **适用数据类型**: 数值类型。 +**适用数据类型**: 数值类型。 - **嵌套子查询支持**: 适用于内层查询和外层查询。 +**嵌套子查询支持**: 适用于内层查询和外层查询。 - **适用于**:表和超级表。 +**适用于**:表和超级表。 - **使用说明**: +**使用说明**: - - 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); - - 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; - - 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 +- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1); +- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用; +- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。 ### STATECOUNT diff --git a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx index a8a92606e4aadf7298359023e739d568788094fd..ba43aa30fd3593b9bab4a1f76de1913087e419fc 100644 --- a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx @@ -5,7 +5,7 @@ title: REST API 为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见[视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。 :::note -与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。从 2.2.0.0 版本开始,支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。从 2.4.0.0 版本开始,RESTful 默认由 taosAdapter 提供,要求必须在 URL 中指定 db_name。 +与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。 ::: ## 安装 @@ -28,54 +28,204 @@ curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.t ```json { - "status": "succ", - "head": [ - "name", - "created_time", - "ntables", - "vgroups", - "replica", - "quorum", - "days", - "keep1,keep2,keep(D)", - "cache(MB)", - "blocks", - "minrows", - "maxrows", - "wallevel", - "fsync", - "comp", - "precision", - "status" - ], - "data": [ - [ - "log", - "2020-09-02 17:23:00.039", - 4, - 1, - 1, - 1, - 10, - "30,30,30", - 1, - 3, - 100, - 4096, - 1, - 3000, - 2, - "us", - "ready" - ] - ], - "rows": 1 + "code": 0, + "column_meta": [ + [ + "name", + "VARCHAR", + 64 + ], + [ + "create_time", + "TIMESTAMP", + 8 + ], + [ + "vgroups", + "SMALLINT", + 2 + ], + [ + "ntables", + "BIGINT", + 8 + ], + [ + "replica", + "TINYINT", + 1 + ], + [ + "strict", + "VARCHAR", + 4 + ], + [ + "duration", + "VARCHAR", + 10 + ], + [ + "keep", + "VARCHAR", + 32 + ], + [ + "buffer", + "INT", + 4 + ], + [ + "pagesize", + "INT", + 4 + ], + [ + "pages", + "INT", + 4 + ], + [ + "minrows", + "INT", + 4 + ], + [ + "maxrows", + "INT", + 4 + ], + [ + "comp", + "TINYINT", + 1 + ], + [ + "precision", + "VARCHAR", + 2 + ], + [ + "status", + "VARCHAR", + 10 + ], + [ + "retention", + "VARCHAR", + 60 + ], + [ + "single_stable", + "BOOL", + 1 + ], + [ + "cachemodel", + "VARCHAR", + 11 + ], + [ + "cachesize", + "INT", + 4 + ], + [ + "wal_level", + "TINYINT", + 1 + ], + [ + "wal_fsync_period", + "INT", + 4 + ], + [ + "wal_retention_period", + "INT", + 4 + ], + [ + "wal_retention_size", + "BIGINT", + 8 + ], + [ + "wal_roll_period", + "INT", + 4 + ], + [ + "wal_seg_size", + "BIGINT", + 8 + ] + ], + "data": [ + [ + "information_schema", + null, + null, + 14, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + "ready", + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + "performance_schema", + null, + null, + 3, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + "ready", + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ] + ], + "rows": 2 } ``` ## HTTP 请求格式 -``` +```text http://:/rest/sql/[db_name] ``` @@ -83,21 +233,21 @@ http://:/rest/sql/[db_name] - fqnd: 集群中的任一台主机 FQDN 或 IP 地址 - port: 配置文件中 httpPort 配置项,缺省为 6041 -- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。(从 2.2.0.0 版本开始支持) +- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。 例如:`http://h1.taos.com:6041/rest/sql/test` 是指向地址为 `h1.taos.com:6041` 的 URL,并将默认使用的数据库库名设置为 `test`。 HTTP 请求的 Header 里需带有身份认证信息,TDengine 支持 Basic 认证与自定义认证两种机制,后续版本将提供标准安全的数字签名机制来做身份验证。 -- 自定义身份认证信息如下所示(token 稍后介绍) +- [自定义身份认证信息](#自定义授权码)如下所示 - ``` + ```text Authorization: Taosd ``` - Basic 身份认证信息如下所示 - ``` + ```text Authorization: Basic ``` @@ -119,41 +269,165 @@ curl -L -u username:password -d "" :/rest/sql/[db_name] ## HTTP 返回格式 -返回值为 JSON 格式,如下: +### HTTP 响应码 + +| **response code** | **说明** | +|-------------------|----------------| +| 200 | 正确返回和 C 接口错误返回 | +| 400 | 参数错误返回 | +| 401 | 鉴权失败 | +| 404 | 接口不存在 | +| 500 | 内部错误 | +| 503 | 系统资源不足 | + +### HTTP body 结构 + + + + + + + + + + + + + + + + + + + + + + +
执行结果说明样例
正确执行 + code:(int)0 代表成功 +
+
+ column_meta:([][3]any)列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int) +
+
+ rows:(int)数据返回行数 +
+
+ data:([][]any)具体数据内容 +
```json { - "status": "succ", - "head": ["ts","current", …], - "column_meta": [["ts",9,8],["current",6,4], …], - "data": [ - ["2018-10-03 14:38:05.000", 10.3, …], - ["2018-10-03 14:38:15.000", 12.6, …] - ], - "rows": 2 + "code": 0, + "column_meta": [["affected_rows", "INT", 4]], + "data": [[0]], + "rows": 1 } ``` -说明: +
正确查询 + code:(int)0 代表成功 +
+
+ column_meta:([][3]any) 列信息,每个列会用三个值来说明,分别为:列名(string)、列类型(string)、类型长度(int) +
+
+ rows:(int)数据返回行数 +
+
+ data:([][]any)具体数据内容 +
-- status: 告知操作结果是成功还是失败。 -- head: 表的定义,如果不返回结果集,则仅有一列 “affected_rows”。(从 2.0.17.0 版本开始,建议不要依赖 head 返回值来判断数据列类型,而推荐使用 column_meta。在后续版本中,有可能会从返回值中去掉 head 这一项。) -- column_meta: 从 2.0.17.0 版本开始,返回值中增加这一项来说明 data 里每一列的数据类型。具体每个列会用三个值来说明,分别为:列名、列类型、类型长度。例如`["current",6,4]`表示列名为“current”;列类型为 6,也即 float 类型;类型长度为 4,也即对应 4 个字节表示的 float。如果列类型为 binary 或 nchar,则类型长度表示该列最多可以保存的内容长度,而不是本次返回值中的具体数据长度。当列类型是 nchar 的时候,其类型长度表示可以保存的 unicode 字符数量,而不是 bytes。 -- data: 具体返回的数据,一行一行的呈现,如果不返回结果集,那么就仅有 [[affected_rows]]。data 中每一行的数据列顺序,与 column_meta 中描述数据列的顺序完全一致。 -- rows: 表明总共多少行数据。 +```json +{ + "code": 0, + "column_meta": [ + ["ts", "TIMESTAMP", 8], + ["count", "BIGINT", 8], + ["endpoint", "VARCHAR", 45], + ["status_code", "INT", 4], + ["client_ip", "VARCHAR", 40], + ["request_method", "VARCHAR", 15], + ["request_uri", "VARCHAR", 128] + ], + "data": [ + [ + "2022-06-29T05:50:55.401Z", + 2, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T05:52:16.603Z", + 1, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T06:28:14.118Z", + 1, + "LAPTOP-NNKFTLTG:6041", + 200, + "172.23.208.1", + "POST", + "/rest/sql" + ], + [ + "2022-06-29T05:52:16.603Z", + 2, + "LAPTOP-NNKFTLTG:6041", + 401, + "172.23.208.1", + "POST", + "/rest/sql" + ] + ], + "rows": 4 +} +``` -column_meta 中的列类型说明: +
错误 + code:(int)错误码 +
+
+ desc:(string)错误描述 +
-- 1:BOOL -- 2:TINYINT -- 3:SMALLINT -- 4:INT -- 5:BIGINT -- 6:FLOAT -- 7:DOUBLE -- 8:BINARY -- 9:TIMESTAMP -- 10:NCHAR +```json +{ + "code": 9728, + "desc": "syntax error near \"1\"" +} +``` + +
+ +### 说明 + +- 时间格式仅支持 RFC3339,结果集为 0 时区 +- 列类型使用如下字符串: + > "NULL" + > "BOOL" + > "TINYINT" + > "SMALLINT" + > "INT" + > "BIGINT" + > "FLOAT" + > "DOUBLE" + > "VARCHAR" + > "TIMESTAMP" + > "NCHAR" + > "TINYINT UNSIGNED" + > "SMALLINT UNSIGNED" + > "INT UNSIGNED" + > "BIGINT UNSIGNED" + > "JSON" ## 自定义授权码 @@ -199,19 +473,44 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ```json { - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - ["2018-10-03 14:38:05.000", 10.3, 219, 0.31], - ["2018-10-03 14:38:15.000", 12.6, 218, 0.33] - ], - "rows": 2 + "code": 0, + "column_meta": [ + [ + "ts", + "TIMESTAMP", + 8 + ], + [ + "current", + "FLOAT", + 4 + ], + [ + "voltage", + "INT", + 4 + ], + [ + "phase", + "FLOAT", + 4 + ] + ], + "data": [ + [ + "2022-07-30T06:44:40.32Z", + 10.3, + 219, + 0.31 + ], + [ + "2022-07-30T06:44:41.32Z", + 12.6, + 218, + 0.33 + ] + ], + "rows": 2 } ``` @@ -225,83 +524,23 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ```json { - "status": "succ", - "head": ["affected_rows"], - "column_meta": [["affected_rows", 4, 4]], - "data": [[1]], - "rows": 1 + "code": 0, + "column_meta": [ + [ + "affected_rows", + "INT", + 4 + ] + ], + "data": [ + [ + 0 + ] + ], + "rows": 1 } ``` -## 其他用法 - -### 结果集采用 Unix 时间戳 - -HTTP 请求 URL 采用 `/rest/sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如 - -```bash -curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt -``` - -返回结果: +## 参考 -```json -{ - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - [1538548685000, 10.3, 219, 0.31], - [1538548695000, 12.6, 218, 0.33] - ], - "rows": 2 -} -``` - -### 结果集采用 UTC 时间字符串 - -HTTP 请求 URL 采用 `/rest/sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如 - -```bash - curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc -``` - -返回值: - -```json -{ - "status": "succ", - "head": ["ts", "current", "voltage", "phase"], - "column_meta": [ - ["ts", 9, 8], - ["current", 6, 4], - ["voltage", 4, 4], - ["phase", 6, 4] - ], - "data": [ - ["2018-10-03T14:38:05.000+0800", 10.3, 219, 0.31], - ["2018-10-03T14:38:15.000+0800", 12.6, 218, 0.33] - ], - "rows": 2 -} -``` - -## 重要配置项 - -下面仅列出一些与 RESTful 接口有关的配置参数,其他系统参数请看配置文件里的说明。 - -- 对外提供 RESTful 服务的端口号,默认绑定到 6041(实际取值是 serverPort + 11,因此可以通过修改 serverPort 参数的设置来修改)。 -- httpMaxThreads: 启动的线程数量,默认为 2(2.0.17.0 版本开始,默认值改为 CPU 核数的一半向下取整)。 -- restfulRowLimit: 返回结果集(JSON 格式)的最大条数,默认值为 10240。 -- httpEnableCompress: 是否支持压缩,默认不支持,目前 TDengine 仅支持 gzip 压缩格式。 -- httpDebugFlag: 日志开关,默认 131。131:仅错误和报警信息,135:调试信息,143:非常详细的调试信息。 -- httpDbNameMandatory: 是否必须在 RESTful URL 中指定默认的数据库名。默认为 0,即关闭此检查。如果设置为 1,那么每个 RESTful URL 中都必须设置一个默认数据库名,否则无论此时执行的 SQL 语句是否需要指定数据库,都会返回一个执行错误,拒绝执行此 SQL 语句。 - -:::note -如果使用 taosd 提供的 REST API, 那么以上配置需要写在 taosd 的配置文件 taos.cfg 中。如果使用 taosAdapter 提供的 REST API, 那么需要参考 taosAdapter [对应的配置方法](/reference/taosadapter/)。 -::: +[taosAdapter](/reference/taosadapter/) diff --git a/docs/zh/14-reference/03-connector/go.mdx b/docs/zh/14-reference/03-connector/go.mdx index 88b09aa5d0b0161973e3e7eabb4cf04357c134f3..a87c948d4a3c0e0764e6c4823608bf7d8b171f24 100644 --- a/docs/zh/14-reference/03-connector/go.mdx +++ b/docs/zh/14-reference/03-connector/go.mdx @@ -65,7 +65,7 @@ REST 连接支持所有能运行 Go 的平台。 ### 使用 go get 安装 -`go get -u github.com/taosdata/driver-go/v2@develop` +`go get -u github.com/taosdata/driver-go/v3@latest` ### 使用 go mod 管理 @@ -80,7 +80,7 @@ REST 连接支持所有能运行 Go 的平台。 ```go import ( "database/sql" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) ``` @@ -132,7 +132,7 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosSql" + _ "github.com/taosdata/driver-go/v3/taosSql" ) func main() { @@ -164,7 +164,7 @@ import ( "database/sql" "fmt" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { @@ -205,14 +205,14 @@ func main() { ### 更多示例程序 -* [示例程序](https://github.com/taosdata/TDengine/tree/develop/examples/go) +* [示例程序](https://github.com/taosdata/driver-go/tree/3.0/examples) * [视频教程](https://www.taosdata.com/blog/2020/11/11/1951.html)。 ## 使用限制 由于 REST 接口无状态所以 `use db` 语法不会生效,需要将 db 名称放到 SQL 语句中,如:`create table if not exists tb1 (ts timestamp, a int)`改为`create table if not exists test.tb1 (ts timestamp, a int)`否则将报错`[0x217] Database not specified or available`。 -也可以将 db 名称放到 DSN 中,将 `root:taosdata@http(localhost:6041)/` 改为 `root:taosdata@http(localhost:6041)/test`,此方法在 TDengine 2.4.0.5 版本的 taosAdapter 开始支持。当指定的 db 不存在时执行 `create database` 语句不会报错,而执行针对该 db 的其他查询或写入操作会报错。 +也可以将 db 名称放到 DSN 中,将 `root:taosdata@http(localhost:6041)/` 改为 `root:taosdata@http(localhost:6041)/test`。当指定的 db 不存在时执行 `create database` 语句不会报错,而执行针对该 db 的其他查询或写入操作会报错。 完整示例如下: @@ -224,7 +224,7 @@ import ( "fmt" "time" - _ "github.com/taosdata/driver-go/v2/taosRestful" + _ "github.com/taosdata/driver-go/v3/taosRestful" ) func main() { @@ -266,35 +266,27 @@ func main() { ## 常见问题 -1. 无法找到包 `github.com/taosdata/driver-go/v2/taosRestful` - - 将 `go.mod` 中 require 块对`github.com/taosdata/driver-go/v2`的引用改为`github.com/taosdata/driver-go/v2 develop`,之后执行 `go mod tidy`。 - -2. database/sql 中 stmt(参数绑定)相关接口崩溃 +1. database/sql 中 stmt(参数绑定)相关接口崩溃 REST 不支持参数绑定相关接口,建议使用`db.Exec`和`db.Query`。 -3. 使用 `use db` 语句后执行其他语句报错 `[0x217] Database not specified or available` +2. 使用 `use db` 语句后执行其他语句报错 `[0x217] Database not specified or available` 在 REST 接口中 SQL 语句的执行无上下文关联,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 -4. 使用 taosSql 不报错使用 taosRestful 报错 `[0x217] Database not specified or available` +3. 使用 taosSql 不报错使用 taosRestful 报错 `[0x217] Database not specified or available` 因为 REST 接口无状态,使用 `use db` 语句不会生效,解决办法见上方使用限制章节。 -5. 升级 `github.com/taosdata/driver-go/v2/taosRestful` - - 将 `go.mod` 文件中对 `github.com/taosdata/driver-go/v2` 的引用改为 `github.com/taosdata/driver-go/v2 develop`,之后执行 `go mod tidy`。 - -6. `readBufferSize` 参数调大后无明显效果 +4. `readBufferSize` 参数调大后无明显效果 `readBufferSize` 调大后会减少获取结果时 `syscall` 的调用。如果查询结果的数据量不大,修改该参数不会带来明显提升,如果该参数修改过大,瓶颈会在解析 JSON 数据。如果需要优化查询速度,需要根据实际情况调整该值来达到查询效果最优。 -7. `disableCompression` 参数设置为 `false` 时查询效率降低 +5. `disableCompression` 参数设置为 `false` 时查询效率降低 当 `disableCompression` 参数设置为 `false` 时查询结果会使用 `gzip` 压缩后传输,拿到数据后要先进行 `gzip` 解压。 -8. `go get` 命令无法获取包,或者获取包超时 +6. `go get` 命令无法获取包,或者获取包超时 设置 Go 代理 `go env -w GOPROXY=https://goproxy.cn,direct`。 @@ -334,17 +326,33 @@ func main() { #### 订阅 -* `func (conn *Connector) Subscribe(restart bool, topic string, sql string, interval time.Duration) (Subscriber, error)` +* `func NewConsumer(conf *Config) (*Consumer, error)` + +创建消费者。 + +* `func (c *Consumer) Subscribe(topics []string) error` + +订阅主题。 - 订阅数据。 +* `func (c *Consumer) Poll(timeout time.Duration) (*Result, error)` -* `func (s *taosSubscriber) Consume() (driver.Rows, error)` +轮询消息。 - 消费订阅数据,返回 `database/sql/driver` 包的 `Rows` 结构。 +* `func (c *Consumer) Commit(ctx context.Context, message unsafe.Pointer) error` -* `func (s *taosSubscriber) Unsubscribe(keepProgress bool)` +提交消息。 - 取消订阅数据。 +* `func (c *Consumer) FreeMessage(message unsafe.Pointer)` + +释放消息。 + +* `func (c *Consumer) Unsubscribe() error` + +取消订阅。 + +* `func (c *Consumer) Close() error` + +关闭消费者。 #### schemaless @@ -366,10 +374,6 @@ func main() { 参数绑定单行插入。 -* `func (conn *Connector) StmtQuery(sql string, params *param.Param) (rows driver.Rows, err error)` - - 参数绑定查询,返回 `database/sql/driver` 包的 `Rows` 结构。 - * `func (conn *Connector) InsertStmt() *insertstmt.InsertStmt` 初始化参数。 @@ -408,4 +412,4 @@ func main() { ## API 参考 -全部 API 见 [driver-go 文档](https://pkg.go.dev/github.com/taosdata/driver-go/v2) +全部 API 见 [driver-go 文档](https://pkg.go.dev/github.com/taosdata/driver-go/v3) diff --git a/docs/zh/14-reference/04-taosadapter.md b/docs/zh/14-reference/04-taosadapter.md index 42bc51a6d3609392537b272d13c414c9382fb9ea..9baafb9b9582445280d5c73c891694e2134d15fb 100644 --- a/docs/zh/14-reference/04-taosadapter.md +++ b/docs/zh/14-reference/04-taosadapter.md @@ -30,7 +30,7 @@ taosAdapter 提供以下功能: ### 安装 taosAdapter -taosAdapter 从 TDengine v2.4.0.0 版本开始成为 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server(taosAdapter 包含在 v2.4.0.0 及以上版本)安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter,您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/develop/BUILD-CN.md)文档。 +taosAdapter 从 TDengine v2.4.0.0 版本开始成为 TDengine 服务端软件 的一部分,如果您使用 TDengine server 您不需要任何额外的步骤来安装 taosAdapter。您可以从[涛思数据官方网站](https://taosdata.com/cn/all-downloads/)下载 TDengine server(taosAdapter 包含在 v2.4.0.0 及以上版本)安装包。如果需要将 taosAdapter 分离部署在 TDengine server 之外的服务器上,则应该在该服务器上安装完整的 TDengine 来安装 taosAdapter。如果您需要使用源代码编译生成 taosAdapter,您可以参考[构建 taosAdapter](https://github.com/taosdata/taosadapter/blob/3.0/BUILD-CN.md)文档。 ### start/stop taosAdapter @@ -69,20 +69,23 @@ Usage of taosAdapter: --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" --help Print this help message and exit --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) + --log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL" --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.rotationCount uint log rotation count. Env "TAOS_ADAPTER_LOG_ROTATION_COUNT" (default 30) --log.rotationSize string log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_ROTATION_SIZE" (default "1GB") --log.rotationTime duration log rotation time. Env "TAOS_ADAPTER_LOG_ROTATION_TIME" (default 24h0m0s) + --log.sqlRotationCount uint record sql log rotation count. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_COUNT" (default 2) + --log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB") + --log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s) --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") --monitor.collectDuration duration Set monitor duration. Env "TAOS_MONITOR_COLLECT_DURATION" (default 3s) --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_MONITOR_IDENTITY" --monitor.incgroup Whether running in cgroup. Env "TAOS_MONITOR_INCGROUP" - --monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") - --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) + --monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) --monitor.user string TDengine user. Env "TAOS_MONITOR_USER" (default "root") --monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_MONITOR_WRITE_INTERVAL" (default 30s) - --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD" (default true) + --monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD" --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") @@ -98,8 +101,10 @@ Usage of taosAdapter: --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) + --opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1) --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) --opentsdb_telnet.enable enable opentsdb telnet,warning: without auth info(default false). Env "TAOS_ADAPTER_OPENTSDB_TELNET_ENABLE" + --opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL" --opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250) --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) @@ -111,9 +116,6 @@ Usage of taosAdapter: -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) - --ssl.certFile string ssl cert file path. Env "TAOS_ADAPTER_SSL_CERT_FILE" - --ssl.enable enable ssl. Env "TAOS_ADAPTER_SSL_ENABLE" - --ssl.keyFile string ssl key file path. Env "TAOS_ADAPTER_SSL_KEY_FILE" --statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000) --statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd") --statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true) @@ -149,12 +151,12 @@ AllowWebSockets 关于 CORS 协议细节请参考:[https://www.w3.org/wiki/CORS_Enabled](https://www.w3.org/wiki/CORS_Enabled) 或 [https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS](https://developer.mozilla.org/zh-CN/docs/Web/HTTP/CORS)。 -示例配置文件参见 [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/develop/example/config/taosadapter.toml)。 +示例配置文件参见 [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/3.0/example/config/taosadapter.toml)。 ## 功能列表 -- 与 RESTful 接口兼容 - [https://www.taosdata.com/cn/documentation/connector#restful](https://www.taosdata.com/cn/documentation/connector#restful) +- RESTful 接口 + [https://docs.taosdata.com/reference/rest-api/](https://docs.taosdata.com/reference/rest-api/) - 兼容 InfluxDB v1 写接口 [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/) - 兼容 OpenTSDB JSON 和 telnet 格式写入 @@ -167,7 +169,7 @@ AllowWebSockets - 与 icinga2 的无缝连接 icinga2 是一个收集检查结果指标和性能数据的软件。请访问 [https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer](https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer) 了解更多信息。 - 与 tcollector 无缝连接 - TCollector 是一个客户端进程,从本地收集器收集数据,并将数据推送到 OpenTSDB。请访问 [http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html) 了解更多信息。 + TCollector是一个客户端进程,从本地收集器收集数据,并将数据推送到 OpenTSDB。请访问 [http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html) 了解更多信息。 - 无缝连接 node_exporter node_export 是一个机器指标的导出器。请访问 [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) 了解更多信息。 - 支持 Prometheus remote_read 和 remote_write @@ -177,13 +179,7 @@ AllowWebSockets ### TDengine RESTful 接口 -您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](/reference/connector#restful)。支持如下 EndPoint : - -```text -/rest/sql -/rest/sqlt -/rest/sqlutc -``` +您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](/reference/rest-api/)。 ### InfluxDB @@ -229,7 +225,7 @@ AllowWebSockets ### node_exporter -Prometheus 使用的由\*NIX 内核暴露的硬件和操作系统指标的输出器 +Prometheus 使用的由 \*NIX 内核暴露的硬件和操作系统指标的输出器 - 启用 taosAdapter 的配置 node_exporter.enable - 设置 node_exporter 的相关配置 @@ -297,15 +293,15 @@ taosAdapter 支持将 http 监控、cpu 百分比和内存百分比写入 TDengi 有关配置参数 -| **配置项** | **描述** | **默认值** | -| ----------------------- | --------------------------------------------------------- | ---------- | -| monitor.collectDuration | cpu 和内存采集间隔 | 3s | -| monitor.identity | 当前 taosadapter 的标识符如果不设置将使用 'hostname:port' | | -| monitor.incgroup | 是否是 cgroup 中运行(容器中运行设置为 true) | false | -| monitor.writeToTD | 是否写入到 TDengine | true | -| monitor.user | TDengine 连接用户名 | root | -| monitor.password | TDengine 连接密码 | taosdata | -| monitor.writeInterval | 写入 TDengine 间隔 | 30s | +| **配置项** | **描述** | **默认值** | +|-------------------------|--------------------------------------------|----------| +| monitor.collectDuration | cpu 和内存采集间隔 | 3s | +| monitor.identity | 当前taosadapter 的标识符如果不设置将使用 'hostname:port' | | +| monitor.incgroup | 是否是 cgroup 中运行(容器中运行设置为 true) | false | +| monitor.writeToTD | 是否写入到 TDengine | false | +| monitor.user | TDengine 连接用户名 | root | +| monitor.password | TDengine 连接密码 | taosdata | +| monitor.writeInterval | 写入TDengine 间隔 | 30s | ## 结果返回条数限制 @@ -314,8 +310,6 @@ taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1 该参数控制以下接口返回 - `http://:6041/rest/sql` -- `http://:6041/rest/sqlt` -- `http://:6041/rest/sqlutc` - `http://:6041/prometheus/v1/remote_read/:db` ## 故障解决 @@ -328,11 +322,11 @@ taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1 在 TDengine server 2.2.x.x 或更早期版本中,taosd 进程包含一个内嵌的 http 服务。如前面所述,taosAdapter 是一个使用 systemd 管理的独立软件,拥有自己的进程。并且两者有一些配置参数和行为是不同的,请见下表: -| **#** | **embedded httpd** | **taosAdapter** | **comment** | -| ----- | ------------------- | ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------ | -| 1 | httpEnableRecordSql | --logLevel=debug | | -| 2 | httpMaxThreads | n/a | taosAdapter 自动管理线程池,无需此参数 | -| 3 | telegrafUseFieldNum | 请参考 taosAdapter telegraf 配置方法 | | -| 4 | restfulRowLimit | restfulRowLimit | 内嵌 httpd 默认输出 10240 行数据,最大允许值为 102400。taosAdapter 也提供 restfulRowLimit 但是默认不做限制。您可以根据实际场景需求进行配置 | -| 5 | httpDebugFlag | 不适用 | httpdDebugFlag 对 taosAdapter 不起作用 | -| 6 | httpDBNameMandatory | 不适用 | taosAdapter 要求 URL 中必须指定数据库名 | +| **#** | **embedded httpd** | **taosAdapter** | **comment** | +|-------|---------------------|-------------------------------|------------------------------------------------------------------------------------------------| +| 1 | httpEnableRecordSql | --logLevel=debug | | +| 2 | httpMaxThreads | n/a | taosAdapter 自动管理线程池,无需此参数 | +| 3 | telegrafUseFieldNum | 请参考 taosAdapter telegraf 配置方法 | +| 4 | restfulRowLimit | restfulRowLimit | 内嵌 httpd 默认输出 10240 行数据,最大允许值为 102400。taosAdapter 也提供 restfulRowLimit 但是默认不做限制。您可以根据实际场景需求进行配置 | +| 5 | httpDebugFlag | 不适用 | httpdDebugFlag 对 taosAdapter 不起作用 | +| 6 | httpDBNameMandatory | 不适用 | taosAdapter 要求 URL 中必须指定数据库名 | \ No newline at end of file diff --git a/examples/c/tmq.c b/examples/c/tmq.c index fd1f146618874132738a675714841e966353311a..3686251b4b17dcef0553e912a7babb04404461e9 100644 --- a/examples/c/tmq.c +++ b/examples/c/tmq.c @@ -29,7 +29,7 @@ static void msg_process(TAOS_RES* msg) { printf("vg: %d\n", tmq_get_vgroup_id(msg)); if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) { tmq_raw_data raw = {0}; - int32_t code = tmq_get_raw(msg, &raw); + int32_t code = tmq_get_raw(msg, &raw); if (code == 0) { TAOS* pConn = taos_connect("192.168.1.86", "root", "taosdata", NULL, 0); if (pConn == NULL) { @@ -302,7 +302,7 @@ int32_t create_topic() { } taos_free_result(pRes); -// pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1"); + // pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1"); pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1, c2, c3 from st1"); if (taos_errno(pRes) != 0) { printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes)); diff --git a/examples/rust b/examples/rust deleted file mode 160000 index 7ed7a97715388fa144718764d6bf20f9bfc29a12..0000000000000000000000000000000000000000 --- a/examples/rust +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 7ed7a97715388fa144718764d6bf20f9bfc29a12 diff --git a/include/common/tcommon.h b/include/common/tcommon.h index c5404085bbb56d01b39866e515bf743349ced983..4eea744be19b51ebb321629c92c8d7e1f515fcda 100644 --- a/include/common/tcommon.h +++ b/include/common/tcommon.h @@ -56,7 +56,6 @@ enum { STREAM_INPUT__DATA_SUBMIT = 1, STREAM_INPUT__DATA_BLOCK, STREAM_INPUT__MERGED_SUBMIT, - // STREAM_INPUT__TABLE_SCAN, STREAM_INPUT__TQ_SCAN, STREAM_INPUT__DATA_RETRIEVE, STREAM_INPUT__GET_RES, @@ -154,7 +153,7 @@ typedef struct SQueryTableDataCond { int32_t order; // desc|asc order to iterate the data block int32_t numOfCols; SColumnInfo* colList; - int32_t type; // data block load type: + int32_t type; // data block load type: STimeWindow twindows; int64_t startVersion; int64_t endVersion; diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 57d1199e17cd701696054b7d962de3bef9bba8c8..9111728e1ad15d7cfc105a5a65ee8364f7ab2f95 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -146,6 +146,7 @@ struct SConfig *taosGetCfg(); void taosSetAllDebugFlag(int32_t flag); void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal); int32_t taosSetCfg(SConfig *pCfg, char *name); +void taosLocalCfgForbiddenToChange(char* name, bool* forbidden); #ifdef __cplusplus } diff --git a/include/common/ttypes.h b/include/common/ttypes.h index ec70dffd342ea84349394db55377cf498f1f53b6..4b3e11f94701052b3b2fa99984a7376e674c297d 100644 --- a/include/common/ttypes.h +++ b/include/common/ttypes.h @@ -143,84 +143,84 @@ typedef struct { } \ } while (0) -#define SET_TYPED_DATA_MIN(_v, _type) \ - do { \ - switch (_type) { \ - case TSDB_DATA_TYPE_BOOL: \ - case TSDB_DATA_TYPE_TINYINT: \ - *(int8_t *)(_v) = INT8_MIN; \ - break; \ - case TSDB_DATA_TYPE_SMALLINT: \ - *(int16_t *)(_v) = INT16_MIN; \ - break; \ - case TSDB_DATA_TYPE_INT: \ - *(int32_t *)(_v) = INT32_MIN; \ - break; \ - case TSDB_DATA_TYPE_BIGINT: \ - case TSDB_DATA_TYPE_TIMESTAMP: \ - *(int64_t *)(_v) = INT64_MIN; \ - break; \ - case TSDB_DATA_TYPE_FLOAT: \ - *(float *)(_v) = FLT_MIN; \ - break; \ - case TSDB_DATA_TYPE_DOUBLE: \ - *(double *)(_v) = DBL_MIN; \ - break; \ - case TSDB_DATA_TYPE_UTINYINT: \ - *(uint8_t *)(_v) = 0; \ - break; \ - case TSDB_DATA_TYPE_USMALLINT: \ - *(uint16_t *)(_v) = 0; \ - break; \ - case TSDB_DATA_TYPE_UBIGINT: \ - *(uint64_t *)(_v) = 0; \ - break; \ - case TSDB_DATA_TYPE_UINT: \ - *(uint32_t *)(_v) = 0; \ - break; \ - default: \ - break; \ - } \ +#define SET_TYPED_DATA_MIN(_v, _type) \ + do { \ + switch (_type) { \ + case TSDB_DATA_TYPE_BOOL: \ + case TSDB_DATA_TYPE_TINYINT: \ + *(int8_t *)(_v) = INT8_MIN; \ + break; \ + case TSDB_DATA_TYPE_SMALLINT: \ + *(int16_t *)(_v) = INT16_MIN; \ + break; \ + case TSDB_DATA_TYPE_INT: \ + *(int32_t *)(_v) = INT32_MIN; \ + break; \ + case TSDB_DATA_TYPE_BIGINT: \ + case TSDB_DATA_TYPE_TIMESTAMP: \ + *(int64_t *)(_v) = INT64_MIN; \ + break; \ + case TSDB_DATA_TYPE_FLOAT: \ + *(float *)(_v) = FLT_MIN; \ + break; \ + case TSDB_DATA_TYPE_DOUBLE: \ + *(double *)(_v) = DBL_MIN; \ + break; \ + case TSDB_DATA_TYPE_UTINYINT: \ + *(uint8_t *)(_v) = 0; \ + break; \ + case TSDB_DATA_TYPE_USMALLINT: \ + *(uint16_t *)(_v) = 0; \ + break; \ + case TSDB_DATA_TYPE_UBIGINT: \ + *(uint64_t *)(_v) = 0; \ + break; \ + case TSDB_DATA_TYPE_UINT: \ + *(uint32_t *)(_v) = 0; \ + break; \ + default: \ + break; \ + } \ } while (0) -#define SET_TYPED_DATA_MAX(_v, _type) \ - do { \ - switch (_type) { \ - case TSDB_DATA_TYPE_BOOL: \ - case TSDB_DATA_TYPE_TINYINT: \ - *(int8_t *)(_v) = INT8_MAX; \ - break; \ - case TSDB_DATA_TYPE_SMALLINT: \ - *(int16_t *)(_v) = INT16_MAX; \ - break; \ - case TSDB_DATA_TYPE_INT: \ - *(int32_t *)(_v) = INT32_MAX; \ - break; \ - case TSDB_DATA_TYPE_BIGINT: \ - case TSDB_DATA_TYPE_TIMESTAMP: \ - *(int64_t *)(_v) = INT64_MAX; \ - break; \ - case TSDB_DATA_TYPE_FLOAT: \ - *(float *)(_v) = FLT_MAX; \ - break; \ - case TSDB_DATA_TYPE_DOUBLE: \ - *(double *)(_v) = DBL_MAX; \ - break; \ - case TSDB_DATA_TYPE_UTINYINT: \ - *(uint8_t *)(_v) = UINT8_MAX; \ - break; \ - case TSDB_DATA_TYPE_USMALLINT: \ - *(uint16_t *)(_v) = UINT16_MAX; \ - break; \ - case TSDB_DATA_TYPE_UINT: \ - *(uint32_t *)(_v) = UINT32_MAX; \ - break; \ - case TSDB_DATA_TYPE_UBIGINT: \ - *(uint64_t *)(_v) = UINT64_MAX; \ - break; \ - default: \ - break; \ - } \ +#define SET_TYPED_DATA_MAX(_v, _type) \ + do { \ + switch (_type) { \ + case TSDB_DATA_TYPE_BOOL: \ + case TSDB_DATA_TYPE_TINYINT: \ + *(int8_t *)(_v) = INT8_MAX; \ + break; \ + case TSDB_DATA_TYPE_SMALLINT: \ + *(int16_t *)(_v) = INT16_MAX; \ + break; \ + case TSDB_DATA_TYPE_INT: \ + *(int32_t *)(_v) = INT32_MAX; \ + break; \ + case TSDB_DATA_TYPE_BIGINT: \ + case TSDB_DATA_TYPE_TIMESTAMP: \ + *(int64_t *)(_v) = INT64_MAX; \ + break; \ + case TSDB_DATA_TYPE_FLOAT: \ + *(float *)(_v) = FLT_MAX; \ + break; \ + case TSDB_DATA_TYPE_DOUBLE: \ + *(double *)(_v) = DBL_MAX; \ + break; \ + case TSDB_DATA_TYPE_UTINYINT: \ + *(uint8_t *)(_v) = UINT8_MAX; \ + break; \ + case TSDB_DATA_TYPE_USMALLINT: \ + *(uint16_t *)(_v) = UINT16_MAX; \ + break; \ + case TSDB_DATA_TYPE_UINT: \ + *(uint32_t *)(_v) = UINT32_MAX; \ + break; \ + case TSDB_DATA_TYPE_UBIGINT: \ + *(uint64_t *)(_v) = UINT64_MAX; \ + break; \ + default: \ + break; \ + } \ } while (0) #define NUM_TO_STRING(_inputType, _input, _outputBytes, _output) \ @@ -260,10 +260,9 @@ typedef struct { } \ } while (0) - -//TODO: use varchar(0) to represent NULL type -#define IS_VAR_NULL_TYPE(_t, _b) ((_t) == TSDB_DATA_TYPE_VARCHAR && (_b) == 0) -#define IS_NULL_TYPE(_t) ((_t) == TSDB_DATA_TYPE_NULL) +// TODO: use varchar(0) to represent NULL type +#define IS_VAR_NULL_TYPE(_t, _b) ((_t) == TSDB_DATA_TYPE_VARCHAR && (_b) == 0) +#define IS_NULL_TYPE(_t) ((_t) == TSDB_DATA_TYPE_NULL) #define IS_SIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_TINYINT && (_t) <= TSDB_DATA_TYPE_BIGINT) #define IS_UNSIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_UTINYINT && (_t) <= TSDB_DATA_TYPE_UBIGINT) @@ -329,7 +328,7 @@ typedef struct tDataTypeDescriptor { int16_t type; int16_t nameLen; int32_t bytes; - char * name; + char *name; int64_t minValue; int64_t maxValue; int32_t (*compFunc)(const char *const input, int32_t inputSize, const int32_t nelements, char *const output, diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index 98db7be0d35b785b11f7a3e305c5e2ac2c592699..f6f2ceb9579b3f5f9e5d3355b87d6b17aa0a66bd 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -202,6 +202,7 @@ bool fmIsForbidStreamFunc(int32_t funcId); bool fmIsIntervalInterpoFunc(int32_t funcId); bool fmIsInterpFunc(int32_t funcId); bool fmIsLastRowFunc(int32_t funcId); +bool fmIsSelectValueFunc(int32_t funcId); bool fmIsSystemInfoFunc(int32_t funcId); bool fmIsImplicitTsFunc(int32_t funcId); bool fmIsClientPseudoColumnFunc(int32_t funcId); diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h index fc1d5245063f400249b99df50b3cc4a853679982..32a53c9f4e3bda4c5e85d5cbd0039e11278acd3b 100644 --- a/include/libs/nodes/cmdnodes.h +++ b/include/libs/nodes/cmdnodes.h @@ -283,6 +283,7 @@ typedef struct SCreateIndexStmt { EIndexType indexType; bool ignoreExists; char indexName[TSDB_INDEX_NAME_LEN]; + char dbName[TSDB_DB_NAME_LEN]; char tableName[TSDB_TABLE_NAME_LEN]; SNodeList* pCols; SIndexOptions* pOptions; diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h index 5dc1e7512f7c288a119e3451a4922b5c366a6592..cd2a7d3f9fdb2c32be5677fd1caffc68ac97edf4 100644 --- a/include/libs/nodes/querynodes.h +++ b/include/libs/nodes/querynodes.h @@ -418,8 +418,6 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal); char* nodesGetFillModeString(EFillMode mode); int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc); -int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, - SNode** pOtherCond); #ifdef __cplusplus } diff --git a/include/libs/scalar/filter.h b/include/libs/scalar/filter.h index f1cd99f04a17a8b316319d96c4e49309406866e1..44064f0a9f52947bf5469e21d60ac09f87fb7eef 100644 --- a/include/libs/scalar/filter.h +++ b/include/libs/scalar/filter.h @@ -46,6 +46,10 @@ extern int32_t filterFreeNcharColumns(SFilterInfo *pFilterInfo); extern void filterFreeInfo(SFilterInfo *info); extern bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t numOfCols, int32_t numOfRows); +/* condition split interface */ +int32_t filterPartitionCond(SNode **pCondition, SNode **pPrimaryKeyCond, SNode **pTagIndexCond, SNode **pTagCond, + SNode **pOtherCond); + #ifdef __cplusplus } #endif diff --git a/include/libs/scalar/scalar.h b/include/libs/scalar/scalar.h index 770b0442ea5413399b543545369246cf5346875b..6322c6a1e969371c20efbb0e2f4b96800242ecee 100644 --- a/include/libs/scalar/scalar.h +++ b/include/libs/scalar/scalar.h @@ -25,7 +25,7 @@ extern "C" { typedef struct SFilterInfo SFilterInfo; -int32_t scalarGetOperatorResultType(SOperatorNode* pOp); +int32_t scalarGetOperatorResultType(SOperatorNode *pOp); /* pNode will be freed in API; @@ -43,7 +43,7 @@ int32_t scalarGetOperatorParamNum(EOperatorType type); int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type); int32_t vectorGetConvertType(int32_t type1, int32_t type2); -int32_t vectorConvertImpl(const SScalarParam* pIn, SScalarParam* pOut, int32_t* overflow); +int32_t vectorConvertImpl(const SScalarParam *pIn, SScalarParam *pOut, int32_t *overflow); /* Math functions */ int32_t absFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); @@ -86,7 +86,7 @@ int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutpu int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); -bool getTimePseudoFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv); +bool getTimePseudoFuncEnv(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv); int32_t winStartTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t winEndTsFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); diff --git a/include/libs/stream/tstreamUpdate.h b/include/libs/stream/tstreamUpdate.h index 6e4a8d62d0e599524d9f8aabd11985df089d3912..a4728e6382c5f6838a07c89ff349e716964a4d5a 100644 --- a/include/libs/stream/tstreamUpdate.h +++ b/include/libs/stream/tstreamUpdate.h @@ -34,11 +34,16 @@ typedef struct SUpdateInfo { TSKEY minTS; SScalableBf* pCloseWinSBF; SHashObj* pMap; + STimeWindow scanWindow; + uint64_t scanGroupId; + uint64_t maxVersion; } SUpdateInfo; SUpdateInfo *updateInfoInitP(SInterval* pInterval, int64_t watermark); SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark); bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts); +void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version); +bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version); void updateInfoDestroy(SUpdateInfo *pInfo); void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo); void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo); diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 14173690967ffd26be92cf13a05af6f7508533fe..de31a970dffc146dcc3b36e3933740200811af47 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -41,7 +41,7 @@ extern "C" { #define WAL_REFRESH_MS 1000 #define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12) #define WAL_FILE_LEN (WAL_PATH_LEN + 32) -#define WAL_MAGIC 0xFAFBFCFDULL +#define WAL_MAGIC 0xFAFBFCFDF4F3F2F1ULL #define WAL_SCAN_BUF_SIZE (1024 * 1024 * 3) typedef enum { @@ -203,6 +203,7 @@ SWalRef *walRefCommittedVer(SWal *); SWalRef *walOpenRef(SWal *); void walCloseRef(SWal *pWal, int64_t refId); int32_t walRefVer(SWalRef *, int64_t ver); +int32_t walPreRefVer(SWalRef *pRef, int64_t ver); void walUnrefVer(SWalRef *); // helper function for raft diff --git a/include/os/osString.h b/include/os/osString.h index 3a4ff18694489c995c8849bcd9c39a14637472c2..8eb341faa7bf61e4c2f67f8a21859da94c0dcbf4 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -62,6 +62,8 @@ typedef int32_t TdUcs4; int32_t taosUcs4len(TdUcs4 *ucs4); int64_t taosStr2int64(const char *str); +void taosConvInit(void); +void taosConvDestroy(); int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs); bool taosMbsToUcs4(const char *mbs, size_t mbs_len, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len); int32_t tasoUcs4Compare(TdUcs4 *f1_ucs4, TdUcs4 *f2_ucs4, int32_t bytes); diff --git a/include/util/taoserror.h b/include/util/taoserror.h index da2f58307dbfc647eaf535c5677d0521fb316b4f..28c1174dfd0897b9a00a98d6cf77d06038c42630 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -258,6 +258,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_MND_SINGLE_STB_MODE_DB TAOS_DEF_ERROR_CODE(0, 0x03C5) #define TSDB_CODE_MND_INVALID_SCHEMA_VER TAOS_DEF_ERROR_CODE(0, 0x03C6) #define TSDB_CODE_MND_STABLE_UID_NOT_MATCH TAOS_DEF_ERROR_CODE(0, 0x03C7) +#define TSDB_CODE_MND_FIELD_CONFLICT_WITH_TSMA TAOS_DEF_ERROR_CODE(0, 0x03C8) // mnode-trans #define TSDB_CODE_MND_TRANS_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03D0) @@ -556,6 +557,7 @@ int32_t* taosGetErrno(); #define TSDB_CODE_PAR_INVALID_SMA_INDEX TAOS_DEF_ERROR_CODE(0, 0x2660) #define TSDB_CODE_PAR_INVALID_SELECTED_EXPR TAOS_DEF_ERROR_CODE(0, 0x2661) #define TSDB_CODE_PAR_GET_META_ERROR TAOS_DEF_ERROR_CODE(0, 0x2662) +#define TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS TAOS_DEF_ERROR_CODE(0, 0x2663) #define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF) //planner diff --git a/include/util/types.h b/include/util/types.h index 1360307156ab726b274f45e18dc607798941804f..8dd0947e9c730197a6f0af391089c118ea09a494 100644 --- a/include/util/types.h +++ b/include/util/types.h @@ -81,11 +81,11 @@ static FORCE_INLINE double taos_align_get_double(const char *pBuf) { typedef uint16_t VarDataLenT; // maxVarDataLen: 32767 #define VARSTR_HEADER_SIZE sizeof(VarDataLenT) -#define varDataLen(v) ((VarDataLenT *)(v))[0] -#define varDataVal(v) ((char *)(v) + VARSTR_HEADER_SIZE) +#define varDataLen(v) ((VarDataLenT *)(v))[0] +#define varDataVal(v) ((char *)(v) + VARSTR_HEADER_SIZE) #define varDataTLen(v) (sizeof(VarDataLenT) + varDataLen(v)) -#define NCHAR_WIDTH_TO_BYTES(n) ((n) * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE) +#define NCHAR_WIDTH_TO_BYTES(n) ((n)*TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE) typedef int32_t VarDataOffsetT; @@ -98,4 +98,4 @@ typedef struct tstr { } #endif -#endif /*_TD_TYPES_H_*/ \ No newline at end of file +#endif /*_TD_TYPES_H_*/ diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 893aa39ce3a74db33f6520bab7455a2331c74a0e..95ce5c8338662c38e81fe4eb8d7422e05c36deba 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -316,6 +316,7 @@ void doDestroyRequest(void *p) { taosArrayDestroy(pRequest->tableList); taosArrayDestroy(pRequest->dbList); + taosArrayDestroy(pRequest->targetTableList); destroyQueryExecRes(&pRequest->body.resInfo.execRes); @@ -361,6 +362,8 @@ void taos_init_imp(void) { initQueryModuleMsgHandle(); + taosConvInit(); + rpcInit(); SCatalogCfg cfg = {.maxDBCacheNum = 100, .maxTblCacheNum = 100}; diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 3d6cf1c626c3bbbe695ea35ba6316439b7886eb3..06bd3f388741edd21e69dc5f1128937f1739feea 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -353,6 +353,7 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) { desc.subDesc = NULL; desc.subPlanNum = 0; } + desc.subPlanNum = taosArrayGetSize(desc.subDesc); ASSERT(desc.subPlanNum == taosArrayGetSize(desc.subDesc)); } else { desc.subDesc = NULL; diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index b04b4ea2aa771c63883d6668f3f7d5a9f8778add..a4eaf057d12ba3e2a57190de0f76a9489ae8fc61 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -75,6 +75,8 @@ void taos_cleanup(void) { cleanupTaskQueue(); + taosConvDestroy(); + tscInfo("all local resources released"); taosCleanupCfg(); taosCloseLog(); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 8d3fbd87922e08802ce43b04c97badb966f895cb..8d15caafb51f2969be8569d99c0c92a0ad0ea125 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1298,9 +1298,7 @@ int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src) { colDataAssign(pDst, pSrc, src->info.rows, &src->info); } - dst->info.rows = src->info.rows; - dst->info.window = src->info.window; - dst->info.type = src->info.type; + dst->info = src->info; return TSDB_CODE_SUCCESS; } @@ -1724,8 +1722,9 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag) { size_t numOfCols = taosArrayGetSize(pDataBlock->pDataBlock); int32_t rows = pDataBlock->info.rows; - printf("%s |block type %d |child id %d|group id %" PRIu64 "\n", flag, (int32_t)pDataBlock->info.type, - pDataBlock->info.childId, pDataBlock->info.groupId); + printf("%s |block ver %" PRIi64 " |block type %d |child id %d|group id %" PRIu64 "\n", flag, + pDataBlock->info.version, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, + pDataBlock->info.groupId); for (int32_t j = 0; j < rows; j++) { printf("%s |", flag); for (int32_t k = 0; k < numOfCols; k++) { @@ -1774,9 +1773,9 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); int32_t rows = pDataBlock->info.rows; int32_t len = 0; - len += snprintf(dumpBuf + len, size - len, "===stream===%s |block type %d|child id %d|group id:%" PRIu64 "|uid:%ld|rows:%d\n", flag, + len += snprintf(dumpBuf + len, size - len, "===stream===%s|block type %d|child id %d|group id:%" PRIu64 "|uid:%ld|rows:%d|version:%" PRIu64 "\n", flag, (int32_t)pDataBlock->info.type, pDataBlock->info.childId, pDataBlock->info.groupId, - pDataBlock->info.uid, pDataBlock->info.rows); + pDataBlock->info.uid, pDataBlock->info.rows, pDataBlock->info.version); if (len >= size - 1) return dumpBuf; for (int32_t j = 0; j < rows; j++) { diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 0cc4e31aed97f235b38ed26f850c1e7e13276bd0..6f4a3060ed99baf08ee465bfd0c9c4f6baede9fa 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -49,7 +49,7 @@ int32_t tsNumOfShmThreads = 1; // queue & threads int32_t tsNumOfRpcThreads = 1; int32_t tsNumOfCommitThreads = 2; -int32_t tsNumOfTaskQueueThreads = 1; +int32_t tsNumOfTaskQueueThreads = 4; int32_t tsNumOfMnodeQueryThreads = 4; int32_t tsNumOfMnodeFetchThreads = 1; int32_t tsNumOfMnodeReadThreads = 1; @@ -317,9 +317,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1; if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1; - tsNumOfTaskQueueThreads = tsNumOfCores / 4; - tsNumOfTaskQueueThreads = TRANGE(tsNumOfTaskQueueThreads, 1, 2); - if (cfgAddInt32(pCfg, "numOfTaskQueueThreads", tsNumOfTaskQueueThreads, 1, 1024, 0) != 0) return -1; + tsNumOfTaskQueueThreads = tsNumOfCores / 2; + tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4); + if (cfgAddInt32(pCfg, "numOfTaskQueueThreads", tsNumOfTaskQueueThreads, 4, 1024, 0) != 0) return -1; return 0; } @@ -594,6 +594,20 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { return 0; } +void taosLocalCfgForbiddenToChange(char* name, bool* forbidden) { + int32_t len = strlen(name); + char lowcaseName[CFG_NAME_MAX_LEN + 1] = {0}; + strntolower(lowcaseName, name, TMIN(CFG_NAME_MAX_LEN, len)); + + if (strcasecmp("charset", name) == 0) { + *forbidden = true; + return; + } + + *forbidden = false; +} + + int32_t taosSetCfg(SConfig *pCfg, char *name) { int32_t len = strlen(name); char lowcaseName[CFG_NAME_MAX_LEN + 1] = {0}; diff --git a/source/common/src/tname.c b/source/common/src/tname.c index b6f49a7219394c2bb1f2a9f3e7901dc5911cb4ae..887c449c56e9758412dbc201bf625f3a80281c2c 100644 --- a/source/common/src/tname.c +++ b/source/common/src/tname.c @@ -162,10 +162,7 @@ int32_t tNameGetDbName(const SName* name, char* dst) { return 0; } -const char* tNameGetDbNameP(const SName* name) { - return &name->dbname[0]; -} - +const char* tNameGetDbNameP(const SName* name) { return &name->dbname[0]; } int32_t tNameGetFullDbName(const SName* name, char* dst) { assert(name != NULL && dst != NULL); @@ -212,7 +209,6 @@ int32_t tNameAddTbName(SName* dst, const char* tbName, size_t nameLen) { return 0; } - int32_t tNameSetAcctId(SName* dst, int32_t acctId) { assert(dst != NULL); dst->acctId = acctId; @@ -340,7 +336,7 @@ void buildChildTableName(RandTableName* rName) { char temp[8] = {0}; rName->childTableName[0] = 't'; rName->childTableName[1] = '_'; - for(int i = 0; i < 16; i++){ + for (int i = 0; i < 16; i++) { sprintf(temp, "%02x", context.digest[i]); strcat(rName->childTableName, temp); } diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index 34c3b40556361297f8d368bf47e5ed8800bd8547..4030eaa6fe2fd32ec5718b6e4f7689f619747366 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -218,6 +218,8 @@ int mainWindows(int argc,char** argv) { taosCleanupArgs(); return -1; } + + taosConvInit(); if (global.dumpConfig) { dmDumpCfg(); diff --git a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c index c2e8a552717041798dea4f7ac61045eb198c170d..582b16ce997bc5f87d4842904cda88861e6f8137 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmMgmt.c +++ b/source/dnode/mgmt/node_mgmt/src/dmMgmt.c @@ -215,6 +215,7 @@ void dmCleanupDnode(SDnode *pDnode) { dmClearVars(pDnode); rpcCleanup(); indexCleanup(); + taosConvDestroy(); dDebug("dnode is closed, ptr:%p", pDnode); } diff --git a/source/dnode/mnode/impl/inc/mndTopic.h b/source/dnode/mnode/impl/inc/mndTopic.h index 7eb53dbdbb1897637ccc4da2d3031f1d49c00b98..c5c4800e0295fa48ee4bf9669200f7ce7a31eff8 100644 --- a/source/dnode/mnode/impl/inc/mndTopic.h +++ b/source/dnode/mnode/impl/inc/mndTopic.h @@ -37,8 +37,6 @@ const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]); int32_t mndSetTopicCommitLogs(SMnode *pMnode, STrans *pTrans, SMqTopicObj *pTopic); -int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char* stbname, int64_t suid, col_id_t colId); - #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 9684753ace7d254d19f78b3d747cbe78e4ee2518..0120e1146d99dab97946445b9b28f425e56a4090 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1639,10 +1639,10 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.walRetentionPeriod, false); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.walRollPeriod, false); + colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.walRetentionSize, false); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.walRetentionSize, false); + colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.walRollPeriod, false); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.walSegmentSize, false); diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 443d70aef92f7582276cc115242176a0f6a06edd..c3296ac5c10910136a4bb279d8188336b03ce188 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -416,7 +416,7 @@ int32_t mndStart(SMnode *pMnode) { } mndSetRestore(pMnode, true); } - + grantReset(pMnode, TSDB_GRANT_ALL, 0); return mndInitTimer(pMnode); @@ -446,20 +446,6 @@ int32_t mndProcessSyncMsg(SRpcMsg *pMsg) { return -1; } - do { - char *syncNodeStr = sync2SimpleStr(pMgmt->sync); - static int64_t mndTick = 0; - if (++mndTick % 10 == 1) { - mTrace("vgId:%d, sync trace msg:%s, %s", syncGetVgId(pMgmt->sync), TMSG_INFO(pMsg->msgType), syncNodeStr); - } - if (gRaftDetailLog) { - char logBuf[512] = {0}; - snprintf(logBuf, sizeof(logBuf), "==mndProcessSyncMsg== msgType:%d, syncNode: %s", pMsg->msgType, syncNodeStr); - syncRpcMsgLog2(logBuf, pMsg); - } - taosMemoryFree(syncNodeStr); - } while (0); - // ToDo: ugly! use function pointer if (syncNodeStrategy(pSyncNode) == SYNC_STRATEGY_STANDARD_SNAPSHOT) { if (pMsg->msgType == TDMT_SYNC_TIMEOUT) { diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index 4b57f45631f02133726d4306539ba66f91928cc3..e55c562e38c207600956cd1eafbb88d744750f7d 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -687,6 +687,7 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl int32_t numOfRows = 0; int32_t cols = 0; SConnObj *pConn = NULL; + int32_t keepTime = tsShellActivityTimer * 3; if (pShow->pIter == NULL) { SProfileMgmt *pMgmt = &pMnode->profileMgmt; @@ -700,6 +701,10 @@ static int32_t mndRetrieveConns(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl break; } + if ((taosGetTimestampMs() - pConn->lastAccessTimeMs) > (keepTime * 1000)) { + continue; + } + cols = 0; SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c index aab0c7e81568d9498c80118a4fb59b5a64ae6f3d..1b0a2c2a131b238e7a036ce7ff731e02162346f8 100644 --- a/source/dnode/mnode/impl/src/mndStb.c +++ b/source/dnode/mnode/impl/src/mndStb.c @@ -45,7 +45,9 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq); static int32_t mndRetrieveStb(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextStb(SMnode *pMnode, void *pIter); static int32_t mndProcessTableCfgReq(SRpcMsg *pReq); -static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, bool needRsp, void* alterOriData, int32_t alterOriDataLen); +static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, bool needRsp, + void *alterOriData, int32_t alterOriDataLen); +static int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t suid, col_id_t colId); int32_t mndInitStb(SMnode *pMnode) { SSdbTable table = { @@ -409,7 +411,8 @@ static FORCE_INLINE int32_t schemaExColIdCompare(const void *colId, const void * return 0; } -static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pStb, int32_t *pContLen, void* alterOriData, int32_t alterOriDataLen) { +static void *mndBuildVCreateStbReq(SMnode *pMnode, SVgObj *pVgroup, SStbObj *pStb, int32_t *pContLen, + void *alterOriData, int32_t alterOriDataLen) { SEncoder encoder = {0}; int32_t contLen; SName name = {0}; @@ -709,7 +712,8 @@ int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreat memcpy(pDst->db, pDb->name, TSDB_DB_FNAME_LEN); pDst->createdTime = taosGetTimestampMs(); pDst->updateTime = pDst->createdTime; - pDst->uid = (pCreate->source == TD_REQ_FROM_TAOX) ? pCreate->suid : mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN); + pDst->uid = + (pCreate->source == TD_REQ_FROM_TAOX) ? pCreate->suid : mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN); pDst->dbUid = pDb->uid; pDst->tagVer = 1; pDst->colVer = 1; @@ -895,9 +899,9 @@ static int32_t mndBuildStbFromAlter(SStbObj *pStb, SStbObj *pDst, SMCreateStbReq pSchema->flags = pField->flags; memcpy(pSchema->name, pField->name, TSDB_COL_NAME_LEN); int32_t cIndex = mndFindSuperTableColumnIndex(pStb, pField->name); - if (cIndex >= 0){ + if (cIndex >= 0) { pSchema->colId = pStb->pColumns[cIndex].colId; - }else{ + } else { pSchema->colId = pDst->nextColId++; } } @@ -909,12 +913,11 @@ static int32_t mndBuildStbFromAlter(SStbObj *pStb, SStbObj *pDst, SMCreateStbReq pSchema->bytes = pField->bytes; memcpy(pSchema->name, pField->name, TSDB_COL_NAME_LEN); int32_t cIndex = mndFindSuperTableTagIndex(pStb, pField->name); - if (cIndex >= 0){ + if (cIndex >= 0) { pSchema->colId = pStb->pTags[cIndex].colId; - }else{ + } else { pSchema->colId = pDst->nextColId++; } - } pDst->tagVer = createReq->tagVer; pDst->colVer = createReq->colVer; @@ -982,7 +985,7 @@ static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) { } } else if (terrno != TSDB_CODE_MND_STB_NOT_EXIST) { goto _OVER; - } else if (createReq.source == TD_REQ_FROM_TAOX && (createReq.tagVer != 1 || createReq.colVer != 1)){ + } else if (createReq.source == TD_REQ_FROM_TAOX && (createReq.tagVer != 1 || createReq.colVer != 1)) { mInfo("stb:%s, alter table does not need to be done, because table is deleted", createReq.name); code = 0; goto _OVER; @@ -1009,7 +1012,7 @@ static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) { } if (isAlter) { - bool needRsp = false; + bool needRsp = false; SStbObj pDst = {0}; if (mndBuildStbFromAlter(pStb, &pDst, &createReq) != 0) { taosMemoryFreeClear(pDst.pTags); @@ -1137,6 +1140,99 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p return 0; } +int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char *stbname, int64_t suid, col_id_t colId) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + while (1) { + SMqTopicObj *pTopic = NULL; + pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic); + if (pIter == NULL) break; + + mDebug("topic:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, subType:%d sql:%s", + pTopic->name, stbname, suid, colId, pTopic->subType, pTopic->sql); + if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) { + sdbRelease(pSdb, pTopic); + continue; + } + + SNode *pAst = NULL; + if (nodesStringToNode(pTopic->ast, &pAst) != 0) { + ASSERT(0); + return -1; + } + + SNodeList *pNodeList = NULL; + nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList); + SNode *pNode = NULL; + FOREACH(pNode, pNodeList) { + SColumnNode *pCol = (SColumnNode *)pNode; + mDebug("topic:%s, check colId:%d tableId:%" PRId64 " ctbStbUid:%" PRId64, pTopic->name, pCol->colId, + pCol->tableId, pTopic->ctbStbUid); + + if (pCol->tableId != suid && pTopic->ctbStbUid != suid) { + mDebug("topic:%s, check colId:%d passed", pTopic->name, pCol->colId); + goto NEXT; + } + if (pCol->colId > 0 && pCol->colId == colId) { + sdbRelease(pSdb, pTopic); + nodesDestroyNode(pAst); + terrno = TSDB_CODE_MND_FIELD_CONFLICT_WITH_TOPIC; + mError("topic:%s, check colId:%d conflicted", pTopic->name, pCol->colId); + return -1; + } + mDebug("topic:%s, check colId:%d passed", pTopic->name, pCol->colId); + } + + NEXT: + sdbRelease(pSdb, pTopic); + nodesDestroyNode(pAst); + } + + while (1) { + SSmaObj *pSma = NULL; + pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pSma); + if (pIter == NULL) break; + + mDebug("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name, stbname, + suid, colId, pSma->sql); + + SNode *pAst = NULL; + if (nodesStringToNode(pSma->ast, &pAst) != 0) { + terrno = TSDB_CODE_SDB_INVALID_DATA_CONTENT; + mError("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d failed since parse AST err", + pSma->name, stbname, suid, colId); + return -1; + } + + SNodeList *pNodeList = NULL; + nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList); + SNode *pNode = NULL; + FOREACH(pNode, pNodeList) { + SColumnNode *pCol = (SColumnNode *)pNode; + mDebug("tsma:%s, check colId:%d tableId:%" PRId64, pSma->name, pCol->colId, pCol->tableId); + + if ((pCol->tableId != suid) && (pSma->stbUid != suid)) { + mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId); + goto NEXT2; + } + if ((pCol->colId) > 0 && (pCol->colId == colId)) { + sdbRelease(pSdb, pSma); + nodesDestroyNode(pAst); + terrno = TSDB_CODE_MND_FIELD_CONFLICT_WITH_TSMA; + mError("tsma:%s, check colId:%d conflicted", pSma->name, pCol->colId); + return -1; + } + mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId); + } + + NEXT2: + sdbRelease(pSdb, pSma); + nodesDestroyNode(pAst); + } + + return 0; +} + static int32_t mndDropSuperTableTag(SMnode *pMnode, const SStbObj *pOld, SStbObj *pNew, const char *tagName) { int32_t tag = mndFindSuperTableTagIndex(pOld, tagName); if (tag < 0) { @@ -1380,7 +1476,8 @@ static int32_t mndSetAlterStbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj * return 0; } -static int32_t mndSetAlterStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb, void* alterOriData, int32_t alterOriDataLen) { +static int32_t mndSetAlterStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb, void *alterOriData, + int32_t alterOriDataLen) { SSdb *pSdb = pMnode->pSdb; SVgObj *pVgroup = NULL; void *pIter = NULL; @@ -1607,7 +1704,8 @@ static int32_t mndBuildSMAlterStbRsp(SDbObj *pDb, SStbObj *pObj, void **pCont, i return 0; } -static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, bool needRsp, void* alterOriData, int32_t alterOriDataLen) { +static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, bool needRsp, + void *alterOriData, int32_t alterOriDataLen) { int32_t code = -1; STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq); if (pTrans == NULL) goto _OVER; @@ -2204,12 +2302,12 @@ static int32_t mndRetrieveStb(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)maxDelay, false); - char rollup[128 + VARSTR_HEADER_SIZE] = {0}; + char rollup[128 + VARSTR_HEADER_SIZE] = {0}; int32_t rollupNum = (int32_t)taosArrayGetSize(pStb->pFuncs); for (int32_t i = 0; i < rollupNum; ++i) { char *funcName = taosArrayGet(pStb->pFuncs, i); if (i) { - strcat(varDataVal(rollup), ", "); + strcat(varDataVal(rollup), ", "); } strcat(varDataVal(rollup), funcName); } diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 7e3e5a9838839b596e5ae5bf07bd3d19a2820d3f..f343c2e055ed3f357a6a9cee6e3e1c8025d055cc 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -72,56 +72,6 @@ const char *mndTopicGetShowName(const char topic[TSDB_TOPIC_FNAME_LEN]) { return strchr(topic, '.') + 1; } -int32_t mndCheckColAndTagModifiable(SMnode *pMnode, const char* stbname, int64_t suid, col_id_t colId) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - while (1) { - SMqTopicObj *pTopic = NULL; - pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic); - if (pIter == NULL) break; - - mDebug("topic:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, subType:%d sql:%s", - pTopic->name, stbname, suid, colId, pTopic->subType, pTopic->sql); - if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) { - sdbRelease(pSdb, pTopic); - continue; - } - - SNode *pAst = NULL; - if (nodesStringToNode(pTopic->ast, &pAst) != 0) { - ASSERT(0); - return -1; - } - - SNodeList *pNodeList = NULL; - nodesCollectColumns((SSelectStmt *)pAst, SQL_CLAUSE_FROM, NULL, COLLECT_COL_TYPE_ALL, &pNodeList); - SNode *pNode = NULL; - FOREACH(pNode, pNodeList) { - SColumnNode *pCol = (SColumnNode *)pNode; - mDebug("topic:%s, check colId:%d tableId:%" PRId64 " ctbStbUid:%" PRId64, pTopic->name, pCol->colId, pCol->tableId, pTopic->ctbStbUid); - - if (pCol->tableId != suid && pTopic->ctbStbUid != suid) { - mDebug("topic:%s, check colId:%d passed", pTopic->name, pCol->colId); - goto NEXT; - } - if (pCol->colId > 0 && pCol->colId == colId) { - sdbRelease(pSdb, pTopic); - nodesDestroyNode(pAst); - terrno = TSDB_CODE_MND_FIELD_CONFLICT_WITH_TOPIC; - mError("topic:%s, check colId:%d conflicted", pTopic->name, pCol->colId); - return -1; - } - mDebug("topic:%s, check colId:%d passed", pTopic->name, pCol->colId); - } - - NEXT: - sdbRelease(pSdb, pTopic); - nodesDestroyNode(pAst); - } - - return 0; -} - SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic) { terrno = TSDB_CODE_OUT_OF_MEMORY; diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index e9e20912c52666d71af546b53e6a946cb53b6833..cb9f3d980933b5d564d3314474ca02d7d1cdb391 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -31,6 +31,7 @@ target_sources( "src/sma/smaOpen.c" "src/sma/smaCommit.c" "src/sma/smaRollup.c" + "src/sma/smaSnapshot.c" "src/sma/smaTimeRange.c" # tsdb @@ -57,6 +58,8 @@ target_sources( "src/tq/tqPush.c" "src/tq/tqSink.c" "src/tq/tqCommit.c" + "src/tq/tqSnapshot.c" + "src/tq/tqOffsetSnapshot.c" ) target_include_directories( vnode diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 4d95a9d7a5dde52572773e8b84a579c127c28598..46519dce14efed2a64ec5b8d6355cab5364073c7 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -89,13 +89,13 @@ void metaReaderClear(SMetaReader *pReader); int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid); int32_t metaReadNext(SMetaReader *pReader); const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *tagVal); -int metaGetTableNameByUid(void* meta, uint64_t uid, char* tbName); +int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName); typedef struct SMetaFltParam { tb_uid_t suid; int16_t cid; int16_t type; - char *val; + void *val; bool reverse; int (*filterFunc)(void *a, void *b, int16_t type); @@ -137,6 +137,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader *pReader, STableBlockDistInfo *pTa int64_t tsdbGetNumOfRowsInMemTable(STsdbReader *pHandle); void *tsdbGetIdx(SMeta *pMeta); void *tsdbGetIvtIdx(SMeta *pMeta); +uint64_t getReaderMaxVersion(STsdbReader *pReader); int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader); int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids); diff --git a/source/dnode/vnode/src/inc/meta.h b/source/dnode/vnode/src/inc/meta.h index e08925acc398f740c1bc4bcd205e14768cdfcc98..a72546fe86026288109f692315f850d7f5852997 100644 --- a/source/dnode/vnode/src/inc/meta.h +++ b/source/dnode/vnode/src/inc/meta.h @@ -78,7 +78,11 @@ struct SMeta { TTB* pTagIdx; TTB* pTtlIdx; - TTB* pSmaIdx; + TTB* pSmaIdx; + + // stream + TTB* pStreamDb; + SMetaIdx* pIdx; }; diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h index 217d40e3aa9ee990f70a9b984cb9fde3beeb089a..c825ab673101b22cab9b36df479f41fac0120200 100644 --- a/source/dnode/vnode/src/inc/sma.h +++ b/source/dnode/vnode/src/inc/sma.h @@ -209,6 +209,9 @@ int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, // smaFileUtil ================ +typedef struct SQTaskFReader SQTaskFReader; +typedef struct SQTaskFWriter SQTaskFWriter; + #define TD_FILE_HEAD_SIZE 512 typedef struct STFInfo STFInfo; diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h index 262300a3e73f9f4f879f5f1e9e9304ce5b180aef..44b9d1f69cd362accc132fd887283ba69077cd96 100644 --- a/source/dnode/vnode/src/inc/tq.h +++ b/source/dnode/vnode/src/inc/tq.h @@ -133,6 +133,9 @@ typedef struct { static STqMgmt tqMgmt = {0}; +int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle); +int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle); + // tqRead int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* offset); int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum); @@ -146,6 +149,7 @@ int32_t tqMetaOpen(STQ* pTq); int32_t tqMetaClose(STQ* pTq); int32_t tqMetaSaveHandle(STQ* pTq, const char* key, const STqHandle* pHandle); int32_t tqMetaDeleteHandle(STQ* pTq, const char* key); +int32_t tqMetaRestoreHandle(STQ* pTq); typedef struct { int32_t size; @@ -156,11 +160,15 @@ void tqOffsetClose(STqOffsetStore*); STqOffset* tqOffsetRead(STqOffsetStore* pStore, const char* subscribeKey); int32_t tqOffsetWrite(STqOffsetStore* pStore, const STqOffset* pOffset); int32_t tqOffsetDelete(STqOffsetStore* pStore, const char* subscribeKey); -int32_t tqOffsetSnapshot(STqOffsetStore* pStore); +int32_t tqOffsetCommitFile(STqOffsetStore* pStore); // tqSink void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data); +// tqOffset +char* tqOffsetBuildFName(const char* path, int32_t ver); +int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname); + static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) { pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA; pOffsetVal->uid = uid; diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index d8c84e952b43a5c0f0cfa64058d8f4a6fb77ca3e..f1e980c026fc55249171ec08bec01227bb8b132b 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -43,6 +43,7 @@ typedef struct STbDataIter STbDataIter; typedef struct SMapData SMapData; typedef struct SBlockIdx SBlockIdx; typedef struct SBlock SBlock; +typedef struct SBlockL SBlockL; typedef struct SColData SColData; typedef struct SBlockDataHdr SBlockDataHdr; typedef struct SBlockData SBlockData; @@ -90,6 +91,9 @@ int32_t tsdbRowCmprFn(const void *p1, const void *p2); void tRowIterInit(SRowIter *pIter, TSDBROW *pRow, STSchema *pTSchema); SColVal *tRowIterNext(SRowIter *pIter); // SRowMerger +int32_t tRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRow, STSchema *pTSchema); +int32_t tRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema); + int32_t tRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema); void tRowMergerClear(SRowMerger *pMerger); int32_t tRowMerge(SRowMerger *pMerger, TSDBROW *pRow); @@ -97,7 +101,6 @@ int32_t tRowMergerGetRow(SRowMerger *pMerger, STSRow **ppRow); // TABLEID int32_t tTABLEIDCmprFn(const void *p1, const void *p2); // TSDBKEY -int32_t tsdbKeyCmprFn(const void *p1, const void *p2); #define MIN_TSDBKEY(KEY1, KEY2) ((tsdbKeyCmprFn(&(KEY1), &(KEY2)) < 0) ? (KEY1) : (KEY2)) #define MAX_TSDBKEY(KEY1, KEY2) ((tsdbKeyCmprFn(&(KEY1), &(KEY2)) > 0) ? (KEY1) : (KEY2)) // SBlockCol @@ -412,6 +415,29 @@ struct SBlock { SSubBlock aSubBlock[TSDB_MAX_SUBBLOCKS]; }; +struct SBlockL { + struct { + int64_t uid; + int64_t version; + TSKEY ts; + } minKey; + struct { + int64_t uid; + int64_t version; + TSKEY ts; + } maxKey; + int64_t minVer; + int64_t maxVer; + int32_t nRow; + int8_t cmprAlg; + int64_t offset; + int32_t szBlock; + int32_t szBlockCol; + int32_t szUid; + int32_t szVer; + int32_t szTSKEY; +}; + struct SColData { int16_t cid; int8_t type; @@ -558,6 +584,26 @@ struct STsdbReadSnap { STsdbFS fs; }; +// ========== inline functions ========== +static FORCE_INLINE int32_t tsdbKeyCmprFn(const void *p1, const void *p2) { + TSDBKEY *pKey1 = (TSDBKEY *)p1; + TSDBKEY *pKey2 = (TSDBKEY *)p2; + + if (pKey1->ts < pKey2->ts) { + return -1; + } else if (pKey1->ts > pKey2->ts) { + return 1; + } + + if (pKey1->version < pKey2->version) { + return -1; + } else if (pKey1->version > pKey2->version) { + return 1; + } + + return 0; +} + #ifdef __cplusplus } #endif diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index 9ed2b25fdf203507e46cb8a79f8c3340bcba5c4f..b90254e543a359290e7e913197ac836592ae18c7 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -49,20 +49,30 @@ extern "C" { #endif -typedef struct SVnodeInfo SVnodeInfo; -typedef struct SMeta SMeta; -typedef struct SSma SSma; -typedef struct STsdb STsdb; -typedef struct STQ STQ; -typedef struct SVState SVState; -typedef struct SVBufPool SVBufPool; -typedef struct SQWorker SQHandle; -typedef struct STsdbKeepCfg STsdbKeepCfg; -typedef struct SMetaSnapReader SMetaSnapReader; -typedef struct SMetaSnapWriter SMetaSnapWriter; -typedef struct STsdbSnapReader STsdbSnapReader; -typedef struct STsdbSnapWriter STsdbSnapWriter; -typedef struct SSnapDataHdr SSnapDataHdr; +typedef struct SVnodeInfo SVnodeInfo; +typedef struct SMeta SMeta; +typedef struct SSma SSma; +typedef struct STsdb STsdb; +typedef struct STQ STQ; +typedef struct SVState SVState; +typedef struct SVBufPool SVBufPool; +typedef struct SQWorker SQHandle; +typedef struct STsdbKeepCfg STsdbKeepCfg; +typedef struct SMetaSnapReader SMetaSnapReader; +typedef struct SMetaSnapWriter SMetaSnapWriter; +typedef struct STsdbSnapReader STsdbSnapReader; +typedef struct STsdbSnapWriter STsdbSnapWriter; +typedef struct STqSnapReader STqSnapReader; +typedef struct STqSnapWriter STqSnapWriter; +typedef struct STqOffsetReader STqOffsetReader; +typedef struct STqOffsetWriter STqOffsetWriter; +typedef struct SStreamTaskReader SStreamTaskReader; +typedef struct SStreamTaskWriter SStreamTaskWriter; +typedef struct SStreamStateReader SStreamStateReader; +typedef struct SStreamStateWriter SStreamStateWriter; +typedef struct SRsmaSnapReader SRsmaSnapReader; +typedef struct SRsmaSnapWriter SRsmaSnapWriter; +typedef struct SSnapDataHdr SSnapDataHdr; #define VNODE_META_DIR "meta" #define VNODE_TSDB_DIR "tsdb" @@ -133,6 +143,7 @@ int32_t tsdbInsertTableData(STsdb* pTsdb, int64_t version, SSubmitMsgIter* p int32_t tsdbDeleteTableData(STsdb* pTsdb, int64_t version, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey); STsdbReader tsdbQueryCacheLastT(STsdb* tsdb, SQueryTableDataCond* pCond, STableListInfo* tableList, uint64_t qId, void* pMemRef); +int32_t tsdbSetKeepCfg(STsdb* pTsdb, STsdbCfg* pCfg); // tq int tqInit(); @@ -150,7 +161,7 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, char* msg, int32_t msgLen); int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen); int32_t tqProcessTaskDropReq(STQ* pTq, char* msg, int32_t msgLen); -int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* data); +int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* data, int64_t ver); int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg); int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg); @@ -196,13 +207,41 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr int32_t metaSnapWrite(SMetaSnapWriter* pWriter, uint8_t* pData, uint32_t nData); int32_t metaSnapWriterClose(SMetaSnapWriter** ppWriter, int8_t rollback); // STsdbSnapReader ======================================== -int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapReader** ppReader); +int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, STsdbSnapReader** ppReader); int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader); int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData); // STsdbSnapWriter ======================================== int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWriter** ppWriter); int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData); int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback); +// STqSnapshotReader == +int32_t tqSnapReaderOpen(STQ* pTq, int64_t sver, int64_t ever, STqSnapReader** ppReader); +int32_t tqSnapReaderClose(STqSnapReader** ppReader); +int32_t tqSnapRead(STqSnapReader* pReader, uint8_t** ppData); +// STqSnapshotWriter ====================================== +int32_t tqSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, STqSnapWriter** ppWriter); +int32_t tqSnapWriterClose(STqSnapWriter** ppWriter, int8_t rollback); +int32_t tqSnapWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData); +// STqOffsetReader ======================================== +int32_t tqOffsetReaderOpen(STQ* pTq, int64_t sver, int64_t ever, STqOffsetReader** ppReader); +int32_t tqOffsetReaderClose(STqOffsetReader** ppReader); +int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData); +// STqOffsetWriter ======================================== +int32_t tqOffsetWriterOpen(STQ* pTq, int64_t sver, int64_t ever, STqOffsetWriter** ppWriter); +int32_t tqOffsetWriterClose(STqOffsetWriter** ppWriter, int8_t rollback); +int32_t tqOffsetSnapWrite(STqOffsetWriter* pWriter, uint8_t* pData, uint32_t nData); +// SStreamTaskWriter ====================================== +// SStreamTaskReader ====================================== +// SStreamStateWriter ===================================== +// SStreamStateReader ===================================== +// SRsmaSnapReader ======================================== +int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader); +int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader); +int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData); +// SRsmaSnapWriter ======================================== +int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter); +int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData); +int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback); typedef struct { int8_t streamType; // sma or other @@ -314,6 +353,19 @@ struct SSma { // sma void smaHandleRes(void* pVnode, int64_t smaId, const SArray* data); +enum { + SNAP_DATA_META = 0, + SNAP_DATA_TSDB = 1, + SNAP_DATA_DEL = 2, + SNAP_DATA_RSMA1 = 3, + SNAP_DATA_RSMA2 = 4, + SNAP_DATA_QTASK = 5, + SNAP_DATA_TQ_HANDLE = 6, + SNAP_DATA_TQ_OFFSET = 7, + SNAP_DATA_STREAM_TASK = 8, + SNAP_DATA_STREAM_STATE = 9, +}; + struct SSnapDataHdr { int8_t type; int64_t index; diff --git a/source/dnode/vnode/src/meta/metaOpen.c b/source/dnode/vnode/src/meta/metaOpen.c index 7c7d14e33777a3e2b9905e6309c1ba12bbb6f40e..85293eff30a1f79989b105c1b2939923664444ce 100644 --- a/source/dnode/vnode/src/meta/metaOpen.c +++ b/source/dnode/vnode/src/meta/metaOpen.c @@ -22,6 +22,7 @@ static int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kL static int ttlIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); static int uidIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); static int smaIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); +static int taskIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2); static int32_t metaInitLock(SMeta *pMeta) { return taosThreadRwlockInit(&pMeta->lock, NULL); } static int32_t metaDestroyLock(SMeta *pMeta) { return taosThreadRwlockDestroy(&pMeta->lock); } @@ -130,6 +131,12 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) { goto _err; } + ret = tdbTbOpen("stream.task.db", sizeof(int64_t), -1, taskIdxKeyCmpr, pMeta->pEnv, &pMeta->pStreamDb); + if (ret < 0) { + metaError("vgId: %d, failed to open meta stream task index since %s", TD_VID(pVnode), tstrerror(terrno)); + goto _err; + } + // open index if (metaOpenIdx(pMeta) < 0) { metaError("vgId:%d, failed to open meta index since %s", TD_VID(pVnode), tstrerror(terrno)); @@ -143,6 +150,7 @@ int metaOpen(SVnode *pVnode, SMeta **ppMeta) { _err: if (pMeta->pIdx) metaCloseIdx(pMeta); + if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb); if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx); if (pMeta->pTtlIdx) tdbTbClose(pMeta->pTtlIdx); if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx); @@ -162,6 +170,7 @@ _err: int metaClose(SMeta *pMeta) { if (pMeta) { if (pMeta->pIdx) metaCloseIdx(pMeta); + if (pMeta->pStreamDb) tdbTbClose(pMeta->pStreamDb); if (pMeta->pSmaIdx) tdbTbClose(pMeta->pSmaIdx); if (pMeta->pTtlIdx) tdbTbClose(pMeta->pTtlIdx); if (pMeta->pTagIvtIdx) indexClose(pMeta->pTagIvtIdx); @@ -183,11 +192,11 @@ int metaClose(SMeta *pMeta) { int32_t metaRLock(SMeta *pMeta) { int32_t ret = 0; - metaDebug("meta rlock %p B", &pMeta->lock); + metaTrace("meta rlock %p B", &pMeta->lock); ret = taosThreadRwlockRdlock(&pMeta->lock); - metaDebug("meta rlock %p E", &pMeta->lock); + metaTrace("meta rlock %p E", &pMeta->lock); return ret; } @@ -195,11 +204,11 @@ int32_t metaRLock(SMeta *pMeta) { int32_t metaWLock(SMeta *pMeta) { int32_t ret = 0; - metaDebug("meta wlock %p B", &pMeta->lock); + metaTrace("meta wlock %p B", &pMeta->lock); ret = taosThreadRwlockWrlock(&pMeta->lock); - metaDebug("meta wlock %p E", &pMeta->lock); + metaTrace("meta wlock %p E", &pMeta->lock); return ret; } @@ -207,11 +216,11 @@ int32_t metaWLock(SMeta *pMeta) { int32_t metaULock(SMeta *pMeta) { int32_t ret = 0; - metaDebug("meta ulock %p B", &pMeta->lock); + metaTrace("meta ulock %p B", &pMeta->lock); ret = taosThreadRwlockUnlock(&pMeta->lock); - metaDebug("meta ulock %p E", &pMeta->lock); + metaTrace("meta ulock %p E", &pMeta->lock); return ret; } @@ -315,7 +324,8 @@ static int tagIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kL return 1; } else if (!pTagIdxKey1->isNull && !pTagIdxKey2->isNull) { // all not NULL, compr tag vals - c = doCompare(pTagIdxKey1->data, pTagIdxKey2->data, pTagIdxKey1->type, 0); + __compar_fn_t func = getComparFunc(pTagIdxKey1->type, 0); + c = func(pTagIdxKey1->data, pTagIdxKey2->data); if (c) return c; } @@ -377,3 +387,16 @@ static int smaIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kL return 0; } + +static int taskIdxKeyCmpr(const void *pKey1, int kLen1, const void *pKey2, int kLen2) { + int32_t uid1 = *(int32_t *)pKey1; + int32_t uid2 = *(int32_t *)pKey2; + + if (uid1 > uid2) { + return 1; + } else if (uid1 < uid2) { + return -1; + } + + return 0; +} diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 142ad181d564a55508bc8c5ffa2505442e618c0d..6a961b7593006c37664e2afe8a7db8744dc86f2b 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -98,9 +98,9 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) { return uid; } -int metaGetTableNameByUid(void* meta, uint64_t uid, char* tbName) { +int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName) { SMetaReader mr = {0}; - metaReaderInit(&mr, (SMeta*)meta, 0); + metaReaderInit(&mr, (SMeta *)meta, 0); metaGetTableEntryByUid(&mr, uid); STR_TO_VARSTR(tbName, mr.me.name); diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c index 46609cf5613c91e1d65eecc257c43f33a733aa76..7f69c7a638cf65dd24f4300cc8cf0babcb8caf50 100644 --- a/source/dnode/vnode/src/meta/metaSnapshot.c +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -109,7 +109,7 @@ int32_t metaSnapRead(SMetaSnapReader* pReader, uint8_t** ppData) { } SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData); - pHdr->type = 0; // TODO: use macro + pHdr->type = SNAP_DATA_META; pHdr->size = nData; memcpy(pHdr->data, pData, nData); @@ -194,4 +194,4 @@ int32_t metaSnapWrite(SMetaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) _err: metaError("vgId:%d vnode snapshot meta write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code)); return code; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/meta/metaStream.c b/source/dnode/vnode/src/meta/metaStream.c new file mode 100644 index 0000000000000000000000000000000000000000..b7b84da2310825bd7cc4550c279ecea702ff3e3d --- /dev/null +++ b/source/dnode/vnode/src/meta/metaStream.c @@ -0,0 +1,16 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "meta.h" diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index 26c81976dcfe58fe574789eb58dfd9c9e6c81316..702c7fb50518b38004f519b4eaa4f7f9585ecf1a 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -793,9 +793,6 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA goto _err; } - if (iCol == 0) { - // TODO : need to update tag index - } ctbEntry.version = version; if (pTagSchema->nCols == 1 && pTagSchema->pSchema[0].type == TSDB_DATA_TYPE_JSON) { ctbEntry.ctbEntry.pTags = taosMemoryMalloc(pAlterTbReq->nTagVal); @@ -849,6 +846,10 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA // save to uid.idx tdbTbUpsert(pMeta->pUidIdx, &ctbEntry.uid, sizeof(tb_uid_t), &version, sizeof(version), &pMeta->txn); + if (iCol == 0) { + metaUpdateTagIdx(pMeta, &ctbEntry); + } + tDecoderClear(&dc1); tDecoderClear(&dc2); if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags); @@ -1131,7 +1132,7 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) { pCtbEntry->uid, &pTagIdxKey, &nTagIdxKey) < 0) { return -1; } - tdbTbInsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, &pMeta->txn); + tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, &pMeta->txn); metaDestroyTagIdxKey(pTagIdxKey); tDecoderClear(&dc); tdbFree(pData); diff --git a/source/dnode/vnode/src/sma/smaSnapshot.c b/source/dnode/vnode/src/sma/smaSnapshot.c index 21dfd8a32d617a2674940c483287506bdf7a8852..c5cb816887180a44050770adfafd84fe5cf43c65 100644 --- a/source/dnode/vnode/src/sma/smaSnapshot.c +++ b/source/dnode/vnode/src/sma/smaSnapshot.c @@ -49,7 +49,8 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapRead for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { if (pSma->pRSmaTsdb[i]) { - code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, &pReader->pDataReader[i]); + code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, i == 0 ? SNAP_DATA_RSMA1 : SNAP_DATA_RSMA2, + &pReader->pDataReader[i]); if (code < 0) { goto _err; } @@ -221,10 +222,9 @@ int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) { } } + smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma)); taosMemoryFree(pWriter); *ppWriter = NULL; - - smaInfo("vgId:%d vnode snapshot rsma writer close succeed", SMA_VID(pWriter->pSma)); return code; _err: @@ -245,15 +245,17 @@ int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) code = tsdbSnapWrite(pWriter->pDataWriter[1], pData, nData); } else if (pHdr->type == SNAP_DATA_QTASK) { code = rsmaSnapWriteQTaskInfo(pWriter, pData, nData); + } else { + ASSERT(0); } if (code < 0) goto _err; _exit: - smaInfo("vgId:%d rsma snapshot write for data %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type); + smaInfo("vgId:%d rsma snapshot write for data type %" PRIi8 " succeed", SMA_VID(pWriter->pSma), pHdr->type); return code; _err: - smaError("vgId:%d rsma snapshot write for data %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type, + smaError("vgId:%d rsma snapshot write for data type %" PRIi8 " failed since %s", SMA_VID(pWriter->pSma), pHdr->type, tstrerror(code)); return code; } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 364ecbab616e64cbac98aaa8062707017538ef69..3e321fa2a103fcf6de48fefc75c1cc2f6c9158e5 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -548,6 +548,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) { SWalRef* pRef = walRefCommittedVer(pTq->pVnode->pWal); if (pRef == NULL) { ASSERT(0); + return -1; } int64_t ver = pRef->refVer; pHandle->pRef = pRef; @@ -684,6 +685,9 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) { taosHashPut(pTq->pStreamTasks, &pTask->taskId, sizeof(int32_t), &pTask, sizeof(void*)); + /*SMeta* pMeta = pTq->pVnode->pMeta;*/ + /*tdbTbUpsert(pMeta->pTaskIdx, &pTask->taskId, sizeof(int32_t), msg, msgLen, &pMeta->txn);*/ + return 0; FAIL: if (pTask->inputQueue) streamQueueClose(pTask->inputQueue); @@ -692,7 +696,7 @@ FAIL: return -1; } -int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq) { +int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq, int64_t ver) { void* pIter = NULL; bool failed = false; SStreamDataSubmit* pSubmit = NULL; @@ -710,7 +714,7 @@ int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq) { SStreamTask* pTask = *(SStreamTask**)pIter; if (!pTask->isDataScan) continue; - qDebug("data submit enqueue stream task: %d", pTask->taskId); + qDebug("data submit enqueue stream task: %d, ver: %ld", pTask->taskId, ver); if (!failed) { if (streamTaskInput(pTask, (SStreamQueueItem*)pSubmit) < 0) { diff --git a/source/dnode/vnode/src/tq/tqCommit.c b/source/dnode/vnode/src/tq/tqCommit.c index 639da22b1c16f39a953ae0b7885344a7ca95768e..dabd97a345f375c6774d37e4f4a408bd0bd44940 100644 --- a/source/dnode/vnode/src/tq/tqCommit.c +++ b/source/dnode/vnode/src/tq/tqCommit.c @@ -15,4 +15,4 @@ #include "tq.h" -int tqCommit(STQ* pTq) { return tqOffsetSnapshot(pTq->pOffsetStore); } +int tqCommit(STQ* pTq) { return tqOffsetCommitFile(pTq->pOffsetStore); } diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c index 290ffe5c8d6a16865df7ee1296c871ccebf0a847..b8e021f795408f10b4363bf1199db82b90ebd2a6 100644 --- a/source/dnode/vnode/src/tq/tqMeta.c +++ b/source/dnode/vnode/src/tq/tqMeta.c @@ -15,7 +15,7 @@ #include "tdbInt.h" #include "tq.h" -static int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) { +int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) { if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1; if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1; @@ -29,7 +29,7 @@ static int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) { return pEncoder->pos; } -static int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) { +int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) { if (tStartDecode(pDecoder) < 0) return -1; if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1; if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1; @@ -43,33 +43,20 @@ static int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) { return 0; } -int32_t tqMetaOpen(STQ* pTq) { - if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaStore) < 0) { +int32_t tqMetaRestoreHandle(STQ* pTq) { + TBC* pCur = NULL; + if (tdbTbcOpen(pTq->pExecStore, &pCur, NULL) < 0) { ASSERT(0); + return -1; } - if (tdbTbOpen("handles", -1, -1, 0, pTq->pMetaStore, &pTq->pExecStore) < 0) { - ASSERT(0); - } - - TXN txn = {0}; - - if (tdbTxnOpen(&txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) { - ASSERT(0); - } - - TBC* pCur; - if (tdbTbcOpen(pTq->pExecStore, &pCur, &txn) < 0) { - ASSERT(0); - } - - void* pKey = NULL; - int kLen = 0; - void* pVal = NULL; - int vLen = 0; + void* pKey = NULL; + int kLen = 0; + void* pVal = NULL; + int vLen = 0; + SDecoder decoder; tdbTbcMoveToFirst(pCur); - SDecoder decoder; while (tdbTbcNext(pCur, &pKey, &kLen, &pVal, &vLen) == 0) { STqHandle handle; @@ -79,6 +66,7 @@ int32_t tqMetaOpen(STQ* pTq) { handle.pRef = walOpenRef(pTq->pVnode->pWal); if (handle.pRef == NULL) { ASSERT(0); + return -1; } walRefVer(handle.pRef, handle.snapshotVer); @@ -109,9 +97,24 @@ int32_t tqMetaOpen(STQ* pTq) { } tdbTbcClose(pCur); - if (tdbTxnClose(&txn) < 0) { + return 0; +} + +int32_t tqMetaOpen(STQ* pTq) { + if (tdbOpen(pTq->path, 16 * 1024, 1, &pTq->pMetaStore) < 0) { ASSERT(0); + return -1; } + + if (tdbTbOpen("tq.db", -1, -1, NULL, pTq->pMetaStore, &pTq->pExecStore) < 0) { + ASSERT(0); + return -1; + } + + if (tqMetaRestoreHandle(pTq) < 0) { + return -1; + } + return 0; } diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c index ec9674d637af8c1456f20769f36e1ec57acc2193..5c1d5d65b4f74297fcc0db81d16788f15ee58ab7 100644 --- a/source/dnode/vnode/src/tq/tqOffset.c +++ b/source/dnode/vnode/src/tq/tqOffset.c @@ -22,29 +22,15 @@ struct STqOffsetStore { SHashObj* pHash; // SHashObj }; -static char* buildFileName(const char* path) { +char* tqOffsetBuildFName(const char* path, int32_t ver) { int32_t len = strlen(path); - char* fname = taosMemoryCalloc(1, len + 20); - snprintf(fname, len + 20, "%s/offset", path); + char* fname = taosMemoryCalloc(1, len + 40); + snprintf(fname, len + 40, "%s/offset-ver%d", path, ver); return fname; } -STqOffsetStore* tqOffsetOpen(STQ* pTq) { - STqOffsetStore* pStore = taosMemoryCalloc(1, sizeof(STqOffsetStore)); - if (pStore == NULL) { - return NULL; - } - pStore->pTq = pTq; - pTq->pOffsetStore = pStore; - - pStore->pHash = taosHashInit(64, MurmurHash3_32, true, HASH_NO_LOCK); - if (pStore->pHash == NULL) { - if (pStore->pHash) taosHashCleanup(pStore->pHash); - return NULL; - } - char* fname = buildFileName(pStore->pTq->path); +int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname) { TdFilePtr pFile = taosOpenFile(fname, TD_FILE_READ); - taosMemoryFree(fname); if (pFile != NULL) { STqOffsetHead head = {0}; int64_t code; @@ -79,11 +65,32 @@ STqOffsetStore* tqOffsetOpen(STQ* pTq) { taosCloseFile(&pFile); } + return 0; +} + +STqOffsetStore* tqOffsetOpen(STQ* pTq) { + STqOffsetStore* pStore = taosMemoryCalloc(1, sizeof(STqOffsetStore)); + if (pStore == NULL) { + return NULL; + } + pStore->pTq = pTq; + pTq->pOffsetStore = pStore; + + pStore->pHash = taosHashInit(64, MurmurHash3_32, true, HASH_NO_LOCK); + if (pStore->pHash == NULL) { + taosMemoryFree(pStore); + return NULL; + } + char* fname = tqOffsetBuildFName(pStore->pTq->path, 0); + if (tqOffsetRestoreFromFile(pStore, fname) < 0) { + ASSERT(0); + } + taosMemoryFree(fname); return pStore; } void tqOffsetClose(STqOffsetStore* pStore) { - tqOffsetSnapshot(pStore); + tqOffsetCommitFile(pStore); taosHashCleanup(pStore->pHash); taosMemoryFree(pStore); } @@ -93,8 +100,6 @@ STqOffset* tqOffsetRead(STqOffsetStore* pStore, const char* subscribeKey) { } int32_t tqOffsetWrite(STqOffsetStore* pStore, const STqOffset* pOffset) { - /*ASSERT(pOffset->val.type == TMQ_OFFSET__LOG);*/ - /*ASSERT(pOffset->val.version >= 0);*/ return taosHashPut(pStore->pHash, pOffset->subKey, strlen(pOffset->subKey), pOffset, sizeof(STqOffset)); } @@ -102,10 +107,9 @@ int32_t tqOffsetDelete(STqOffsetStore* pStore, const char* subscribeKey) { return taosHashRemove(pStore->pHash, subscribeKey, strlen(subscribeKey)); } -int32_t tqOffsetSnapshot(STqOffsetStore* pStore) { - // open file - // TODO file name should be with a version - char* fname = buildFileName(pStore->pTq->path); +int32_t tqOffsetCommitFile(STqOffsetStore* pStore) { + // TODO file name should be with a newer version + char* fname = tqOffsetBuildFName(pStore->pTq->path, 0); TdFilePtr pFile = taosOpenFile(fname, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_APPEND); taosMemoryFree(fname); if (pFile == NULL) { diff --git a/source/dnode/vnode/src/tq/tqOffsetSnapshot.c b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c new file mode 100644 index 0000000000000000000000000000000000000000..cacb82b702a3e7e3a1f54fe6515e75d873be079a --- /dev/null +++ b/source/dnode/vnode/src/tq/tqOffsetSnapshot.c @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "meta.h" +#include "tdbInt.h" +#include "tq.h" + +// STqOffsetReader ======================================== +struct STqOffsetReader { + STQ* pTq; + int64_t sver; + int64_t ever; + int8_t readEnd; +}; + +int32_t tqOffsetReaderOpen(STQ* pTq, int64_t sver, int64_t ever, STqOffsetReader** ppReader) { + STqOffsetReader* pReader = NULL; + + pReader = taosMemoryCalloc(1, sizeof(STqOffsetReader)); + if (pReader == NULL) { + *ppReader = NULL; + return -1; + } + pReader->pTq = pTq; + pReader->sver = sver; + pReader->ever = ever; + + tqInfo("vgId:%d vnode snapshot tq offset reader opened", TD_VID(pTq->pVnode)); + + *ppReader = pReader; + return 0; +} + +int32_t tqOffsetReaderClose(STqOffsetReader** ppReader) { + taosMemoryFree(*ppReader); + *ppReader = NULL; + return 0; +} + +int32_t tqOffsetSnapRead(STqOffsetReader* pReader, uint8_t** ppData) { + if (pReader->readEnd != 0) return 0; + + char* fname = tqOffsetBuildFName(pReader->pTq->path, 0); + TdFilePtr pFile = taosOpenFile(fname, TD_FILE_READ); + taosMemoryFree(fname); + if (pFile != NULL) { + return 0; + } + + int64_t sz = 0; + if (taosStatFile(fname, &sz, NULL) < 0) { + ASSERT(0); + } + + SSnapDataHdr* buf = taosMemoryCalloc(1, sz + sizeof(SSnapDataHdr)); + if (buf == NULL) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + return terrno; + } + void* abuf = POINTER_SHIFT(buf, sizeof(SSnapDataHdr)); + int64_t contLen = taosReadFile(pFile, abuf, sz); + if (contLen != sz) { + ASSERT(0); + return -1; + } + buf->size = sz; + buf->type = SNAP_DATA_TQ_OFFSET; + *ppData = (uint8_t*)buf; + + pReader->readEnd = 1; + return 0; +} + +// STqOffseWriter ======================================== +struct STqOffsetWriter { + STQ* pTq; + int64_t sver; + int64_t ever; + int32_t tmpFileVer; + char* fname; +}; + +int32_t tqOffsetWriterOpen(STQ* pTq, int64_t sver, int64_t ever, STqOffsetWriter** ppWriter) { + int32_t code = 0; + STqOffsetWriter* pWriter; + + pWriter = (STqOffsetWriter*)taosMemoryCalloc(1, sizeof(STqOffsetWriter)); + if (pWriter == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + pWriter->pTq = pTq; + pWriter->sver = sver; + pWriter->ever = ever; + + *ppWriter = pWriter; + return code; + +_err: + tqError("vgId:%d tq snapshot writer open failed since %s", TD_VID(pTq->pVnode), tstrerror(code)); + *ppWriter = NULL; + return code; +} + +int32_t tqOffsetWriterClose(STqOffsetWriter** ppWriter, int8_t rollback) { + STqOffsetWriter* pWriter = *ppWriter; + STQ* pTq = pWriter->pTq; + char* fname = tqOffsetBuildFName(pTq->path, 0); + + if (rollback) { + taosRemoveFile(pWriter->fname); + } else { + taosRenameFile(pWriter->fname, fname); + if (tqOffsetRestoreFromFile(pTq->pOffsetStore, fname) < 0) { + ASSERT(0); + } + } + taosMemoryFree(fname); + taosMemoryFree(pWriter->fname); + taosMemoryFree(pWriter); + *ppWriter = NULL; + return 0; +} + +int32_t tqOffsetSnapWrite(STqOffsetWriter* pWriter, uint8_t* pData, uint32_t nData) { + STQ* pTq = pWriter->pTq; + pWriter->tmpFileVer = 1; + pWriter->fname = tqOffsetBuildFName(pTq->path, pWriter->tmpFileVer); + TdFilePtr pFile = taosOpenFile(pWriter->fname, TD_FILE_CREATE | TD_FILE_WRITE); + SSnapDataHdr* pHdr = (SSnapDataHdr*)pData; + int64_t size = pHdr->size; + ASSERT(size == nData - sizeof(SSnapDataHdr)); + if (pFile) { + int64_t contLen = taosWriteFile(pFile, pHdr->data, size); + if (contLen != size) { + ASSERT(0); + } + } else { + ASSERT(0); + return -1; + } + return 0; +} diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c index 6097ddd49eb504a2d5e8f6b41a632952c389992d..0720cf015bc30aa36f1f5f8de432b79d7586e28d 100644 --- a/source/dnode/vnode/src/tq/tqPush.c +++ b/source/dnode/vnode/src/tq/tqPush.c @@ -252,7 +252,7 @@ int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) SSubmitReq* pReq = (SSubmitReq*)data; pReq->version = ver; - tqProcessStreamTrigger(pTq, data); + tqProcessStreamTrigger(pTq, data, ver); } return 0; diff --git a/source/dnode/vnode/src/tq/tqSnapshot.c b/source/dnode/vnode/src/tq/tqSnapshot.c new file mode 100644 index 0000000000000000000000000000000000000000..21172134baecaee806489ddb6d20e07293f172c2 --- /dev/null +++ b/source/dnode/vnode/src/tq/tqSnapshot.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include "meta.h" +#include "tdbInt.h" +#include "tq.h" + +// STqSnapReader ======================================== +struct STqSnapReader { + STQ* pTq; + int64_t sver; + int64_t ever; + TBC* pCur; +}; + +int32_t tqSnapReaderOpen(STQ* pTq, int64_t sver, int64_t ever, STqSnapReader** ppReader) { + int32_t code = 0; + STqSnapReader* pReader = NULL; + + // alloc + pReader = (STqSnapReader*)taosMemoryCalloc(1, sizeof(STqSnapReader)); + if (pReader == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + pReader->pTq = pTq; + pReader->sver = sver; + pReader->ever = ever; + + // impl + code = tdbTbcOpen(pTq->pExecStore, &pReader->pCur, NULL); + if (code) { + taosMemoryFree(pReader); + goto _err; + } + + code = tdbTbcMoveToFirst(pReader->pCur); + if (code) { + taosMemoryFree(pReader); + goto _err; + } + + tqInfo("vgId:%d vnode snapshot tq reader opened", TD_VID(pTq->pVnode)); + + *ppReader = pReader; + return code; + +_err: + tqError("vgId:%d vnode snapshot tq reader open failed since %s", TD_VID(pTq->pVnode), tstrerror(code)); + *ppReader = NULL; + return code; +} + +int32_t tqSnapReaderClose(STqSnapReader** ppReader) { + int32_t code = 0; + + tdbTbcClose((*ppReader)->pCur); + taosMemoryFree(*ppReader); + *ppReader = NULL; + + return code; +} + +int32_t tqSnapRead(STqSnapReader* pReader, uint8_t** ppData) { + int32_t code = 0; + const void* pKey = NULL; + const void* pVal = NULL; + int32_t kLen = 0; + int32_t vLen = 0; + SDecoder decoder; + STqHandle handle; + + *ppData = NULL; + for (;;) { + if (tdbTbcGet(pReader->pCur, &pKey, &kLen, &pVal, &vLen)) { + goto _exit; + } + + tDecoderInit(&decoder, (uint8_t*)pVal, vLen); + tDecodeSTqHandle(&decoder, &handle); + tDecoderClear(&decoder); + + if (handle.snapshotVer <= pReader->sver && handle.snapshotVer >= pReader->ever) { + tdbTbcMoveToNext(pReader->pCur); + break; + } else { + tdbTbcMoveToNext(pReader->pCur); + } + } + + ASSERT(pVal && vLen); + + *ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + vLen); + if (*ppData == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + + SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData); + pHdr->type = SNAP_DATA_TQ_HANDLE; + pHdr->size = vLen; + memcpy(pHdr->data, pVal, vLen); + + tqInfo("vgId:%d vnode snapshot tq read data, version:%" PRId64 " subKey: %s vLen:%d", TD_VID(pReader->pTq->pVnode), + handle.snapshotVer, handle.subKey, vLen); + +_exit: + return code; + +_err: + tqError("vgId:%d vnode snapshot tq read data failed since %s", TD_VID(pReader->pTq->pVnode), tstrerror(code)); + return code; +} + +// STqSnapWriter ======================================== +struct STqSnapWriter { + STQ* pTq; + int64_t sver; + int64_t ever; + TXN txn; +}; + +int32_t tqSnapWriterOpen(STQ* pTq, int64_t sver, int64_t ever, STqSnapWriter** ppWriter) { + int32_t code = 0; + STqSnapWriter* pWriter; + + // alloc + pWriter = (STqSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter)); + if (pWriter == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _err; + } + pWriter->pTq = pTq; + pWriter->sver = sver; + pWriter->ever = ever; + + if (tdbTxnOpen(&pWriter->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, 0) < 0) { + ASSERT(0); + } + + *ppWriter = pWriter; + return code; + +_err: + tqError("vgId:%d tq snapshot writer open failed since %s", TD_VID(pTq->pVnode), tstrerror(code)); + *ppWriter = NULL; + return code; +} + +int32_t tqSnapWriterClose(STqSnapWriter** ppWriter, int8_t rollback) { + int32_t code = 0; + STqSnapWriter* pWriter = *ppWriter; + STQ* pTq = pWriter->pTq; + + if (rollback) { + ASSERT(0); + } else { + code = tdbCommit(pWriter->pTq->pMetaStore, &pWriter->txn); + if (code) goto _err; + } + + taosMemoryFree(pWriter); + *ppWriter = NULL; + + // restore from metastore + if (tqMetaRestoreHandle(pTq) < 0) { + goto _err; + } + + return code; + +_err: + tqError("vgId:%d tq snapshot writer close failed since %s", TD_VID(pWriter->pTq->pVnode), tstrerror(code)); + return code; +} + +int32_t tqSnapWrite(STqSnapWriter* pWriter, uint8_t* pData, uint32_t nData) { + int32_t code = 0; + STQ* pTq = pWriter->pTq; + SDecoder decoder = {0}; + SDecoder* pDecoder = &decoder; + STqHandle handle; + + tDecoderInit(pDecoder, pData + sizeof(SSnapDataHdr), nData - sizeof(SSnapDataHdr)); + code = tDecodeSTqHandle(pDecoder, &handle); + if (code) goto _err; + code = tqMetaSaveHandle(pTq, handle.subKey, &handle); + if (code < 0) goto _err; + tDecoderClear(pDecoder); + + return code; + +_err: + tDecoderClear(pDecoder); + tqError("vgId:%d vnode snapshot tq write failed since %s", TD_VID(pTq->pVnode), tstrerror(code)); + return code; +} diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c index 194bd2e924351e26c6ed526286f01f6b67619087..e6db8128655717c260745d5a957dc6dfddb2b204 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c @@ -36,16 +36,20 @@ typedef struct { TSKEY minKey; TSKEY maxKey; // commit file data - SDataFReader *pReader; - SArray *aBlockIdx; // SArray - SMapData oBlockMap; // SMapData, read from reader - SBlockData oBlockData; - SDataFWriter *pWriter; - SArray *aBlockIdxN; // SArray - SMapData nBlockMap; // SMapData - SBlockData nBlockData; - SSkmInfo skmTable; - SSkmInfo skmRow; + struct { + SDataFReader *pReader; + SArray *aBlockIdx; // SArray + SMapData mBlock; // SMapData, read from reader + SBlockData bData; + } dReader; + struct { + SDataFWriter *pWriter; + SArray *aBlockIdx; // SArray + SMapData mBlock; // SMapData + SBlockData bData; + } dWriter; + SSkmInfo skmTable; + SSkmInfo skmRow; /* commit del */ SDelFReader *pDelFReader; SDelFWriter *pDelFWriter; @@ -276,16 +280,16 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) { pCommitter->nextKey = TSKEY_MAX; // old - taosArrayClear(pCommitter->aBlockIdx); - tMapDataReset(&pCommitter->oBlockMap); - tBlockDataReset(&pCommitter->oBlockData); + taosArrayClear(pCommitter->dReader.aBlockIdx); + tMapDataReset(&pCommitter->dReader.mBlock); + tBlockDataReset(&pCommitter->dReader.bData); pRSet = (SDFileSet *)taosArraySearch(pCommitter->fs.aDFileSet, &(SDFileSet){.fid = pCommitter->commitFid}, tDFileSetCmprFn, TD_EQ); if (pRSet) { - code = tsdbDataFReaderOpen(&pCommitter->pReader, pTsdb, pRSet); + code = tsdbDataFReaderOpen(&pCommitter->dReader.pReader, pTsdb, pRSet); if (code) goto _err; - code = tsdbReadBlockIdx(pCommitter->pReader, pCommitter->aBlockIdx, NULL); + code = tsdbReadBlockIdx(pCommitter->dReader.pReader, pCommitter->dReader.aBlockIdx, NULL); if (code) goto _err; } @@ -296,9 +300,9 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) { SSmaFile fSma; SDFileSet wSet = {.pHeadF = &fHead, .pDataF = &fData, .pLastF = &fLast, .pSmaF = &fSma}; - taosArrayClear(pCommitter->aBlockIdxN); - tMapDataReset(&pCommitter->nBlockMap); - tBlockDataReset(&pCommitter->nBlockData); + taosArrayClear(pCommitter->dWriter.aBlockIdx); + tMapDataReset(&pCommitter->dWriter.mBlock); + tBlockDataReset(&pCommitter->dWriter.bData); if (pRSet) { wSet.diskId = pRSet->diskId; wSet.fid = pCommitter->commitFid; @@ -307,14 +311,20 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) { fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0}; fSma = *pRSet->pSmaF; } else { - wSet.diskId = (SDiskID){.level = 0, .id = 0}; + SDiskID did = {0}; + + tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did); + + tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did); + + wSet.diskId = did; wSet.fid = pCommitter->commitFid; fHead = (SHeadFile){.commitID = pCommitter->commitID, .offset = 0, .size = 0}; fData = (SDataFile){.commitID = pCommitter->commitID, .size = 0}; fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0}; fSma = (SSmaFile){.commitID = pCommitter->commitID, .size = 0}; } - code = tsdbDataFWriterOpen(&pCommitter->pWriter, pTsdb, &wSet); + code = tsdbDataFWriterOpen(&pCommitter->dWriter.pWriter, pTsdb, &wSet); if (code) goto _err; _exit: @@ -385,10 +395,11 @@ static int32_t tsdbCommitBlockData(SCommitter *pCommitter, SBlockData *pBlockDat } } - code = tsdbWriteBlockData(pCommitter->pWriter, pBlockData, NULL, NULL, pBlockIdx, pBlock, pCommitter->cmprAlg); + code = + tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, NULL, NULL, pBlockIdx, pBlock, pCommitter->cmprAlg); if (code) goto _err; - code = tMapDataPutItem(&pCommitter->nBlockMap, pBlock, tPutBlock); + code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock); if (code) goto _err; return code; @@ -401,8 +412,8 @@ static int32_t tsdbMergeTableData(SCommitter *pCommitter, STbDataIter *pIter, SB int8_t toDataOnly) { int32_t code = 0; SBlockIdx *pBlockIdx = &(SBlockIdx){.suid = pIter->pTbData->suid, .uid = pIter->pTbData->uid}; - SBlockData *pBlockDataMerge = &pCommitter->oBlockData; - SBlockData *pBlockData = &pCommitter->nBlockData; + SBlockData *pBlockDataMerge = &pCommitter->dReader.bData; + SBlockData *pBlockData = &pCommitter->dWriter.bData; SBlock block; SBlock *pBlock = █ TSDBROW *pRow1; @@ -410,7 +421,7 @@ static int32_t tsdbMergeTableData(SCommitter *pCommitter, STbDataIter *pIter, SB TSDBROW *pRow2 = &row2; // read SBlockData - code = tsdbReadBlockData(pCommitter->pReader, pBlockIdx, pBlockMerge, pBlockDataMerge, NULL, NULL); + code = tsdbReadBlockData(pCommitter->dReader.pReader, pBlockIdx, pBlockMerge, pBlockDataMerge, NULL, NULL); if (code) goto _err; code = tBlockDataSetSchema(pBlockData, pCommitter->skmTable.pTSchema); @@ -507,7 +518,7 @@ static int32_t tsdbCommitTableMemData(SCommitter *pCommitter, STbDataIter *pIter TSDBROW *pRow; SBlock block; SBlock *pBlock = █ - SBlockData *pBlockData = &pCommitter->nBlockData; + SBlockData *pBlockData = &pCommitter->dWriter.bData; int64_t suid = pIter->pTbData->suid; int64_t uid = pIter->pTbData->uid; @@ -569,14 +580,14 @@ static int32_t tsdbCommitTableDiskData(SCommitter *pCommitter, SBlock *pBlock, S SBlock block; if (pBlock->last) { - code = tsdbReadBlockData(pCommitter->pReader, pBlockIdx, pBlock, &pCommitter->oBlockData, NULL, NULL); + code = tsdbReadBlockData(pCommitter->dReader.pReader, pBlockIdx, pBlock, &pCommitter->dReader.bData, NULL, NULL); if (code) goto _err; tBlockReset(&block); - code = tsdbCommitBlockData(pCommitter, &pCommitter->oBlockData, &block, pBlockIdx, 0); + code = tsdbCommitBlockData(pCommitter, &pCommitter->dReader.bData, &block, pBlockIdx, 0); if (code) goto _err; } else { - code = tMapDataPutItem(&pCommitter->nBlockMap, pBlock, tPutBlock); + code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock); if (code) goto _err; } @@ -592,10 +603,10 @@ static int32_t tsdbCommitTableDataEnd(SCommitter *pCommitter, int64_t suid, int6 SBlockIdx blockIdx = {.suid = suid, .uid = uid}; SBlockIdx *pBlockIdx = &blockIdx; - code = tsdbWriteBlock(pCommitter->pWriter, &pCommitter->nBlockMap, NULL, pBlockIdx); + code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, NULL, pBlockIdx); if (code) goto _err; - if (taosArrayPush(pCommitter->aBlockIdxN, pBlockIdx) == NULL) { + if (taosArrayPush(pCommitter->dWriter.aBlockIdx, pBlockIdx) == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _err; } @@ -637,7 +648,7 @@ static int32_t tsdbGetOvlpNRow(STbDataIter *pIter, SBlock *pBlock) { static int32_t tsdbMergeAsSubBlock(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlock) { int32_t code = 0; - SBlockData *pBlockData = &pCommitter->nBlockData; + SBlockData *pBlockData = &pCommitter->dWriter.bData; SBlockIdx *pBlockIdx = &(SBlockIdx){.suid = pIter->pTbData->suid, .uid = pIter->pTbData->uid}; SBlock block; TSDBROW *pRow; @@ -705,10 +716,10 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData, SBl } if (pBlockIdx) { - code = tsdbReadBlock(pCommitter->pReader, pBlockIdx, &pCommitter->oBlockMap, NULL); + code = tsdbReadBlock(pCommitter->dReader.pReader, pBlockIdx, &pCommitter->dReader.mBlock, NULL); if (code) goto _err; - nBlock = pCommitter->oBlockMap.nItem; + nBlock = pCommitter->dReader.mBlock.nItem; ASSERT(nBlock > 0); suid = pBlockIdx->suid; @@ -720,13 +731,13 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData, SBl if (pRow == NULL && nBlock == 0) goto _exit; // start =========== - tMapDataReset(&pCommitter->nBlockMap); + tMapDataReset(&pCommitter->dWriter.mBlock); SBlock block; SBlock *pBlock = █ iBlock = 0; if (iBlock < nBlock) { - tMapDataGetItemByIdx(&pCommitter->oBlockMap, iBlock, pBlock, tGetBlock); + tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); } else { pBlock = NULL; } @@ -750,7 +761,7 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData, SBl if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL; iBlock++; if (iBlock < nBlock) { - tMapDataGetItemByIdx(&pCommitter->oBlockMap, iBlock, pBlock, tGetBlock); + tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); } else { pBlock = NULL; } @@ -765,7 +776,7 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData, SBl iBlock++; if (iBlock < nBlock) { - tMapDataGetItemByIdx(&pCommitter->oBlockMap, iBlock, pBlock, tGetBlock); + tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); } else { pBlock = NULL; } @@ -792,7 +803,7 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData, SBl SBlock nextBlock = {0}; tBlockReset(&nextBlock); - tMapDataGetItemByIdx(&pCommitter->oBlockMap, iBlock + 1, &nextBlock, tGetBlock); + tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock + 1, &nextBlock, tGetBlock); toKey = nextBlock.minKey; } @@ -804,7 +815,7 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData, SBl if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) pRow = NULL; iBlock++; if (iBlock < nBlock) { - tMapDataGetItemByIdx(&pCommitter->oBlockMap, iBlock, pBlock, tGetBlock); + tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); } else { pBlock = NULL; } @@ -816,7 +827,7 @@ static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData, SBl iBlock++; if (iBlock < nBlock) { - tMapDataGetItemByIdx(&pCommitter->oBlockMap, iBlock, pBlock, tGetBlock); + tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock); } else { pBlock = NULL; } @@ -851,23 +862,23 @@ static int32_t tsdbCommitFileDataEnd(SCommitter *pCommitter) { int32_t code = 0; // write blockIdx - code = tsdbWriteBlockIdx(pCommitter->pWriter, pCommitter->aBlockIdxN, NULL); + code = tsdbWriteBlockIdx(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockIdx, NULL); if (code) goto _err; // update file header - code = tsdbUpdateDFileSetHeader(pCommitter->pWriter); + code = tsdbUpdateDFileSetHeader(pCommitter->dWriter.pWriter); if (code) goto _err; // upsert SDFileSet - code = tsdbFSUpsertFSet(&pCommitter->fs, &pCommitter->pWriter->wSet); + code = tsdbFSUpsertFSet(&pCommitter->fs, &pCommitter->dWriter.pWriter->wSet); if (code) goto _err; // close and sync - code = tsdbDataFWriterClose(&pCommitter->pWriter, 1); + code = tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 1); if (code) goto _err; - if (pCommitter->pReader) { - code = tsdbDataFReaderClose(&pCommitter->pReader); + if (pCommitter->dReader.pReader) { + code = tsdbDataFReaderClose(&pCommitter->dReader.pReader); if (code) goto _err; } @@ -892,14 +903,14 @@ static int32_t tsdbCommitFileData(SCommitter *pCommitter) { int32_t iTbData = 0; int32_t nTbData = taosArrayGetSize(pMemTable->aTbData); int32_t iBlockIdx = 0; - int32_t nBlockIdx = taosArrayGetSize(pCommitter->aBlockIdx); + int32_t nBlockIdx = taosArrayGetSize(pCommitter->dReader.aBlockIdx); STbData *pTbData; SBlockIdx *pBlockIdx; ASSERT(nTbData > 0); pTbData = (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData); - pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->aBlockIdx, iBlockIdx) : NULL; + pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, iBlockIdx) : NULL; while (pTbData || pBlockIdx) { if (pTbData && pBlockIdx) { int32_t c = tTABLEIDCmprFn(pTbData, pBlockIdx); @@ -930,7 +941,7 @@ static int32_t tsdbCommitFileData(SCommitter *pCommitter) { if (code) goto _err; iBlockIdx++; - pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->aBlockIdx, iBlockIdx) : NULL; + pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, iBlockIdx) : NULL; continue; _commit_table_mem_and_disk: @@ -938,7 +949,7 @@ static int32_t tsdbCommitFileData(SCommitter *pCommitter) { if (code) goto _err; iBlockIdx++; - pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->aBlockIdx, iBlockIdx) : NULL; + pBlockIdx = (iBlockIdx < nBlockIdx) ? (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, iBlockIdx) : NULL; iTbData++; pTbData = (iTbData < nTbData) ? (STbData *)taosArrayGetP(pMemTable->aTbData, iTbData) : NULL; continue; @@ -952,8 +963,8 @@ static int32_t tsdbCommitFileData(SCommitter *pCommitter) { _err: tsdbError("vgId:%d commit file data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); - tsdbDataFReaderClose(&pCommitter->pReader); - tsdbDataFWriterClose(&pCommitter->pWriter, 0); + tsdbDataFReaderClose(&pCommitter->dReader.pReader); + tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 0); return code; } @@ -990,22 +1001,22 @@ _err: static int32_t tsdbCommitDataStart(SCommitter *pCommitter) { int32_t code = 0; - pCommitter->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); - if (pCommitter->aBlockIdx == NULL) { + pCommitter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); + if (pCommitter->dReader.aBlockIdx == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } - pCommitter->aBlockIdxN = taosArrayInit(0, sizeof(SBlockIdx)); - if (pCommitter->aBlockIdxN == NULL) { + pCommitter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx)); + if (pCommitter->dWriter.aBlockIdx == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; goto _exit; } - code = tBlockDataInit(&pCommitter->oBlockData); + code = tBlockDataInit(&pCommitter->dReader.bData); if (code) goto _exit; - code = tBlockDataInit(&pCommitter->nBlockData); + code = tBlockDataInit(&pCommitter->dWriter.bData); if (code) goto _exit; _exit: @@ -1013,12 +1024,12 @@ _exit: } static void tsdbCommitDataEnd(SCommitter *pCommitter) { - taosArrayDestroy(pCommitter->aBlockIdx); - tMapDataClear(&pCommitter->oBlockMap); - tBlockDataClear(&pCommitter->oBlockData, 1); - taosArrayDestroy(pCommitter->aBlockIdxN); - tMapDataClear(&pCommitter->nBlockMap); - tBlockDataClear(&pCommitter->nBlockData, 1); + taosArrayDestroy(pCommitter->dReader.aBlockIdx); + tMapDataClear(&pCommitter->dReader.mBlock); + tBlockDataClear(&pCommitter->dReader.bData, 1); + taosArrayDestroy(pCommitter->dWriter.aBlockIdx); + tMapDataClear(&pCommitter->dWriter.mBlock); + tBlockDataClear(&pCommitter->dWriter.bData, 1); tTSchemaDestroy(pCommitter->skmTable.pTSchema); tTSchemaDestroy(pCommitter->skmRow.pTSchema); } diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c index 0b355d91b461d121be0359f7973d3fbe13e838c7..3c832e9c2a4b7b7a2fcaf0174786297e06e31b31 100644 --- a/source/dnode/vnode/src/tsdb/tsdbOpen.c +++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c @@ -15,11 +15,8 @@ #include "tsdb.h" -static int tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg); - -// implementation - -static int tsdbSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg) { +int32_t tsdbSetKeepCfg(STsdb *pTsdb, STsdbCfg *pCfg) { + STsdbKeepCfg *pKeepCfg = &pTsdb->keepCfg; pKeepCfg->precision = pCfg->precision; pKeepCfg->days = pCfg->days; pKeepCfg->keep0 = pCfg->keep0; @@ -56,7 +53,7 @@ int tsdbOpen(SVnode *pVnode, STsdb **ppTsdb, const char *dir, STsdbKeepCfg *pKee pTsdb->pVnode = pVnode; taosThreadRwlockInit(&pTsdb->rwLock, NULL); if (!pKeepCfg) { - tsdbSetKeepCfg(&pTsdb->keepCfg, &pVnode->config.tsdbCfg); + tsdbSetKeepCfg(pTsdb, &pVnode->config.tsdbCfg); } else { memcpy(&pTsdb->keepCfg, pKeepCfg, sizeof(STsdbKeepCfg)); } diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index 89b270a540e0a39e852bbc36ec88d364b9a9cf79..8e9a726c7cfaa21f36fcb60e4616a0baa87fbe33 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -80,15 +80,15 @@ typedef struct SFilesetIter { typedef struct SFileDataBlockInfo { // index position in STableBlockScanInfo in order to check whether neighbor block overlaps with it uint64_t uid; - int32_t tbBlockIdx; + int32_t tbBlockIdx; } SFileDataBlockInfo; typedef struct SDataBlockIter { - int32_t numOfBlocks; - int32_t index; - SArray* blockList; // SArray - int32_t order; - SBlock block; // current SBlock data + int32_t numOfBlocks; + int32_t index; + SArray* blockList; // SArray + int32_t order; + SBlock block; // current SBlock data SHashObj* pTableMap; } SDataBlockIter; @@ -124,8 +124,8 @@ struct STsdbReader { SSDataBlock* pResBlock; int32_t capacity; SReaderStatus status; - char* idStr; // query info handle, for debug purpose - int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows + char* idStr; // query info handle, for debug purpose + int32_t type; // query type: 1. retrieve all data blocks, 2. retrieve direct prev|next rows SBlockLoadSuppInfo suppInfo; STsdbReadSnap* pReadSnap; SIOCostSummary cost; @@ -133,8 +133,8 @@ struct STsdbReader { SDataFReader* pFileReader; SVersionRange verRange; - int32_t step; - STsdbReader* innerReader[2]; + int32_t step; + STsdbReader* innerReader[2]; }; static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter); @@ -143,7 +143,7 @@ static int buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, i static TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader); static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader, SRowMerger* pMerger); -static int32_t doMergeRowsInBuf(SIterInfo* pIter, int64_t ts, SArray* pDelList, SRowMerger* pMerger, +static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger, STsdbReader* pReader); static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow); static int32_t doAppendRowFromBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex); @@ -212,8 +212,8 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK pTsdbReader->idStr); } - tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, %s", pTsdbReader, numOfTables, (sizeof(STableBlockScanInfo)*numOfTables)/1024.0, - pTsdbReader->idStr); + tsdbDebug("%p create %d tables scan-info, size:%.2f Kb, %s", pTsdbReader, numOfTables, + (sizeof(STableBlockScanInfo) * numOfTables) / 1024.0, pTsdbReader->idStr); return pTableMap; } @@ -397,7 +397,8 @@ static SSDataBlock* createResBlock(SQueryTableDataCond* pCond, int32_t capacity) return pResBlock; } -static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, int32_t capacity, const char* idstr) { +static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, int32_t capacity, + const char* idstr) { int32_t code = 0; int8_t level = 0; STsdbReader* pReader = (STsdbReader*)taosMemoryCalloc(1, sizeof(*pReader)); @@ -577,7 +578,7 @@ static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, int64_t et2 = taosGetTimestampUs(); tsdbDebug("load block index for %d tables completed, elapsed time:%.2f ms, set blockIdx:%.2f ms, size:%.2f Kb %s", - (int32_t)num, (et1 - st)/1000.0, (et2-et1)/1000.0, num * sizeof(SBlockIdx)/1024.0, pReader->idStr); + (int32_t)num, (et1 - st) / 1000.0, (et2 - et1) / 1000.0, num * sizeof(SBlockIdx) / 1024.0, pReader->idStr); pReader->cost.headFileLoadTime += (et1 - st) / 1000.0; @@ -592,7 +593,7 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_ *numOfValidTables = 0; int64_t st = taosGetTimestampUs(); - size_t size = 0; + size_t size = 0; STableBlockScanInfo* px = NULL; while (1) { @@ -642,9 +643,9 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, uint32_ } } - double el = (taosGetTimestampUs() - st)/1000.0; + double el = (taosGetTimestampUs() - st) / 1000.0; tsdbDebug("load block of %d tables completed, blocks:%d in %d tables, size:%.2f Kb, elapsed time:%.2f ms %s", - numOfTables, *numOfBlocks, *numOfValidTables, size/1000.0, el, pReader->idStr); + numOfTables, *numOfBlocks, *numOfValidTables, size / 1000.0, el, pReader->idStr); pReader->cost.numOfBlocks += (*numOfBlocks); pReader->cost.headFileLoadTime += el; @@ -680,9 +681,7 @@ static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter) { return pFBlockInfo; } -static SBlock* getCurrentBlock(SDataBlockIter* pBlockIter) { - return &pBlockIter->block; -} +static SBlock* getCurrentBlock(SDataBlockIter* pBlockIter) { return &pBlockIter->block; } static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) { SReaderStatus* pStatus = &pReader->status; @@ -690,7 +689,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn SBlockData* pBlockData = &pStatus->fileBlockData; SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); - SBlock* pBlock = getCurrentBlock(pBlockIter); + SBlock* pBlock = getCurrentBlock(pBlockIter); SSDataBlock* pResBlock = pReader->pResBlock; int32_t numOfOutputCols = blockDataGetNumOfCols(pResBlock); @@ -772,17 +771,17 @@ static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockI int64_t st = taosGetTimestampUs(); SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); - SBlock* pBlock = getCurrentBlock(pBlockIter); + SBlock* pBlock = getCurrentBlock(pBlockIter); - SSDataBlock* pResBlock = pReader->pResBlock; - int32_t numOfCols = blockDataGetNumOfCols(pResBlock); + SSDataBlock* pResBlock = pReader->pResBlock; + int32_t numOfCols = blockDataGetNumOfCols(pResBlock); SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo; SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; - SBlockIdx blockIdx = {.suid = pReader->suid, .uid = pBlockScanInfo->uid}; - int32_t code = tsdbReadColData(pReader->pFileReader, &blockIdx, pBlock, pSupInfo->colIds, numOfCols, - pBlockData, NULL, NULL); + SBlockIdx blockIdx = {.suid = pReader->suid, .uid = pBlockScanInfo->uid}; + int32_t code = + tsdbReadColData(pReader->pFileReader, &blockIdx, pBlock, pSupInfo->colIds, numOfCols, pBlockData, NULL, NULL); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -857,7 +856,7 @@ static int32_t fileDataBlockOrderCompar(const void* pLeft, const void* pRight, v } static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter) { - SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); + SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter); STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid)); int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pFBlock->tbBlockIdx); @@ -882,7 +881,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte int64_t st = taosGetTimestampUs(); SBlockOrderSupporter sup = {0}; - int32_t code = initBlockOrderSupporter(&sup, numOfTables); + int32_t code = initBlockOrderSupporter(&sup, numOfTables); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -937,8 +936,8 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte } int64_t et = taosGetTimestampUs(); - tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted, elapsed time:%.2f ms %s", pReader, cnt, - (et - st)/1000.0, pReader->idStr); + tsdbDebug("%p create blocks info struct completed for one table, %d blocks not sorted, elapsed time:%.2f ms %s", + pReader, cnt, (et - st) / 1000.0, pReader->idStr); pBlockIter->index = asc ? 0 : (numOfBlocks - 1); cleanupBlockOrderSupporter(&sup); @@ -976,7 +975,8 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte } int64_t et = taosGetTimestampUs(); - tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, cnt, (et-st)/1000.0, pReader->idStr); + tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, cnt, (et - st) / 1000.0, + pReader->idStr); cleanupBlockOrderSupporter(&sup); taosMemoryFree(pTree); @@ -1024,7 +1024,7 @@ static SBlock* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STab int32_t step = asc ? 1 : -1; *nextIndex = pFBlockInfo->tbBlockIdx + step; - SBlock *pBlock = taosMemoryCalloc(1, sizeof(SBlock)); + SBlock* pBlock = taosMemoryCalloc(1, sizeof(SBlock)); int32_t* indexInMapdata = taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex); tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, *indexInMapdata, pBlock, tGetBlock); @@ -1196,10 +1196,10 @@ static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo* setComposedBlockFlag(pReader, true); double elapsedTime = (taosGetTimestampUs() - st) / 1000.0; - tsdbDebug( - "%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%d, brange: %" PRId64 - " - %" PRId64 " %s", - pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey, pReader->idStr); + tsdbDebug("%p build data block from cache completed, elapsed time:%.2f ms, numOfRows:%d, brange: %" PRId64 + " - %" PRId64 " %s", + pReader, elapsedTime, pBlock->info.rows, pBlock->info.window.skey, pBlock->info.window.ekey, + pReader->idStr); pReader->cost.buildmemBlock += elapsedTime; return code; @@ -1230,7 +1230,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); tRowMerge(&merge, pRow); - doMergeRowsInBuf(pIter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); tRowMergerGetRow(&merge, &pTSRow); } @@ -1246,7 +1246,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* updateSchema(pRow, pBlockScanInfo->uid, pReader); tRowMergerInit(&merge, pRow, pReader->pSchema); - doMergeRowsInBuf(pIter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); tRowMerge(&merge, &fRow); doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); @@ -1291,12 +1291,12 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* if (ik.ts == key) { tRowMerge(&merge, piRow); - doMergeRowsInBuf(&pBlockScanInfo->iiter, key, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iiter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader); } if (k.ts == key) { tRowMerge(&merge, pRow); - doMergeRowsInBuf(&pBlockScanInfo->iter, key, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader); } tRowMergerGetRow(&merge, &pTSRow); @@ -1336,11 +1336,11 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* updateSchema(pRow, uid, pReader); tRowMergerInit(&merge, pRow, pReader->pSchema); - doMergeRowsInBuf(&pBlockScanInfo->iter, key, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader); if (ik.ts == k.ts) { tRowMerge(&merge, piRow); - doMergeRowsInBuf(&pBlockScanInfo->iiter, key, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iiter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader); } if (k.ts == key) { @@ -1514,9 +1514,10 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader, STableBlockScanInfo* setComposedBlockFlag(pReader, true); int64_t et = taosGetTimestampUs(); - tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 " rows:%d, elapsed time:%.2f ms %s", pReader, - pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, pResBlock->info.rows, - (et - st)/1000.0, pReader->idStr); + tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 + " rows:%d, elapsed time:%.2f ms %s", + pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, + pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr); return TSDB_CODE_SUCCESS; } @@ -1694,7 +1695,7 @@ static TSDBKEY getCurrentKeyInBuf(SDataBlockIter* pBlockIter, STsdbReader* pRead static int32_t moveToNextFile(STsdbReader* pReader, int32_t* numOfBlocks) { SReaderStatus* pStatus = &pReader->status; - size_t numOfTables = taosHashGetSize(pReader->status.pTableMap); + size_t numOfTables = taosHashGetSize(pReader->status.pTableMap); SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx)); while (1) { @@ -2111,7 +2112,8 @@ TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pRea } } -int32_t doMergeRowsInBuf(SIterInfo* pIter, int64_t ts, SArray* pDelList, SRowMerger* pMerger, STsdbReader* pReader) { +int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger, + STsdbReader* pReader) { while (1) { pIter->hasVal = tsdbTbDataIterNext(pIter->iter); if (!pIter->hasVal) { @@ -2130,7 +2132,19 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, int64_t ts, SArray* pDelList, SRowMer break; } - tRowMerge(pMerger, pRow); + int32_t sversion = TSDBROW_SVERSION(pRow); + STSchema* pTSchema = NULL; + if (sversion != pReader->pSchema->version) { + metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema); + } else { + pTSchema = pReader->pSchema; + } + + tRowMergerAdd(pMerger, pRow, pTSchema); + + if (sversion != pReader->pSchema->version) { + taosMemoryFree(pTSchema); + } } return TSDB_CODE_SUCCESS; @@ -2227,7 +2241,7 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc CHECK_FILEBLOCK_STATE st; SFileDataBlockInfo* pFileBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter); - SBlock* pCurrentBlock = getCurrentBlock(&pReader->status.blockIter); + SBlock* pCurrentBlock = getCurrentBlock(&pReader->status.blockIter); checkForNeighborFileBlock(pReader, pScanInfo, pCurrentBlock, pFileBlockInfo, pMerger, key, &st); if (st == CHECK_FILEBLOCK_QUIT) { break; @@ -2254,12 +2268,23 @@ void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDe SRowMerger merge = {0}; TSDBKEY k = TSDBROW_KEY(pRow); - updateSchema(pRow, uid, pReader); + // updateSchema(pRow, uid, pReader); + int32_t sversion = TSDBROW_SVERSION(pRow); + STSchema* pTSchema = NULL; + if (sversion != pReader->pSchema->version) { + metaGetTbTSchemaEx(pReader->pTsdb->pVnode->pMeta, pReader->suid, uid, sversion, &pTSchema); + } else { + pTSchema = pReader->pSchema; + } - tRowMergerInit(&merge, pRow, pReader->pSchema); - doMergeRowsInBuf(pIter, k.ts, pDelList, &merge, pReader); + tRowMergerInit2(&merge, pReader->pSchema, pRow, pTSchema); + doMergeRowsInBuf(pIter, uid, k.ts, pDelList, &merge, pReader); tRowMergerGetRow(&merge, pTSRow); tRowMergerClear(&merge); + + if (sversion != pReader->pSchema->version) { + taosMemoryFree(pTSchema); + } } void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, @@ -2273,18 +2298,18 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo updateSchema(piRow, pBlockScanInfo->uid, pReader); tRowMergerInit(&merge, piRow, pReader->pSchema); - doMergeRowsInBuf(&pBlockScanInfo->iiter, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, ik.ts, pBlockScanInfo->delSkyline, &merge, pReader); tRowMerge(&merge, pRow); - doMergeRowsInBuf(&pBlockScanInfo->iter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); } else { updateSchema(pRow, pBlockScanInfo->uid, pReader); tRowMergerInit(&merge, pRow, pReader->pSchema); - doMergeRowsInBuf(&pBlockScanInfo->iter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); tRowMerge(&merge, piRow); - doMergeRowsInBuf(&pBlockScanInfo->iiter, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); + doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader); } tRowMergerGetRow(&merge, pTSRow); @@ -2477,6 +2502,10 @@ void* tsdbGetIvtIdx(SMeta* pMeta) { return metaGetIvtIdx(pMeta); } +uint64_t getReaderMaxVersion(STsdbReader *pReader) { + return pReader->verRange.maxVer; +} + /** * @brief Get all suids since suid * @@ -2522,7 +2551,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl if (pCond->type == TIMEWINDOW_RANGE_EXTERNAL) { // update the SQueryTableDataCond to create inner reader STimeWindow w = pCond->twindows; - int32_t order = pCond->order; + int32_t order = pCond->order; if (order == TSDB_ORDER_ASC) { pCond->twindows.ekey = pCond->twindows.skey; pCond->twindows.skey = INT64_MIN; @@ -2541,7 +2570,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl if (order == TSDB_ORDER_ASC) { pCond->twindows.skey = w.ekey; pCond->twindows.ekey = INT64_MAX; - } else { + } else { pCond->twindows.skey = INT64_MIN; pCond->twindows.ekey = w.ekey; } @@ -2589,10 +2618,11 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl } } } else { - STsdbReader* pPrevReader = pReader->innerReader[0]; + STsdbReader* pPrevReader = pReader->innerReader[0]; SDataBlockIter* pBlockIter = &pPrevReader->status.blockIter; - initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order, pPrevReader->idStr); + initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader->order, + pPrevReader->idStr); resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap); // no data in files, let's try buffer in memory @@ -2647,12 +2677,14 @@ void tsdbReaderClose(STsdbReader* pReader) { SIOCostSummary* pCost = &pReader->cost; - tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%"PRId64" SMA-time:%.2f ms, " - "fileBlocks:%"PRId64", fileBlocks-time:%.2f ms, build in-memory-block-time:%.2f ms, STableBlockScanInfo " - "size:%.2f Kb %s", + tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64 + " SMA-time:%.2f ms, " + "fileBlocks:%" PRId64 + ", fileBlocks-time:%.2f ms, build in-memory-block-time:%.2f ms, STableBlockScanInfo " + "size:%.2f Kb %s", pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaData, pCost->smaLoadTime, pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock, - numOfTables * sizeof(STableBlockScanInfo) /1000.0, pReader->idStr); + numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr); taosMemoryFree(pReader->idStr); taosMemoryFree(pReader->pSchema); @@ -2731,9 +2763,9 @@ static void setBlockInfo(STsdbReader* pReader, SDataBlockInfo* pDataBlockInfo) { void tsdbRetrieveDataBlockInfo(STsdbReader* pReader, SDataBlockInfo* pDataBlockInfo) { if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) { - if (pReader->step == EXTERNAL_ROWS_MAIN) { + if (pReader->step == EXTERNAL_ROWS_MAIN) { setBlockInfo(pReader, pDataBlockInfo); - } else if (pReader->step == EXTERNAL_ROWS_PREV) { + } else if (pReader->step == EXTERNAL_ROWS_PREV) { setBlockInfo(pReader->innerReader[0], pDataBlockInfo); } else { setBlockInfo(pReader->innerReader[1], pDataBlockInfo); @@ -2747,7 +2779,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS int32_t code = 0; *allHave = false; - if(pReader->type == TIMEWINDOW_RANGE_EXTERNAL) { + if (pReader->type == TIMEWINDOW_RANGE_EXTERNAL) { *pBlockStatis = NULL; return TSDB_CODE_SUCCESS; } @@ -2758,7 +2790,7 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS return TSDB_CODE_SUCCESS; } - SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter); + SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter); SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter); int64_t stime = taosGetTimestampUs(); @@ -2863,7 +2895,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { } pReader->order = pCond->order; - pReader->type = TIMEWINDOW_RANGE_CONTAINED; + pReader->type = TIMEWINDOW_RANGE_CONTAINED; pReader->status.loadFromFile = true; pReader->status.pTableIter = NULL; pReader->window = updateQueryTimeWindow(pReader->pTsdb, &pCond->twindows); @@ -2882,7 +2914,7 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap); resetDataBlockScanInfo(pReader->status.pTableMap); - int32_t code = 0; + int32_t code = 0; SDataBlockIter* pBlockIter = &pReader->status.blockIter; // no data in files, let's try buffer in memory @@ -2891,8 +2923,8 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) { } else { code = initForFirstBlockInFile(pReader, pBlockIter); if (code != TSDB_CODE_SUCCESS) { - tsdbError("%p reset reader failed, numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s", - pReader, numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr); + tsdbError("%p reset reader failed, numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s", pReader, + numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr); return code; } } diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index 51d7edcf71597e0829cb62660e5caf02e8c8008d..6bb2b8c253ff6a8a153f0194a9319f94513c3480 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -21,6 +21,7 @@ struct STsdbSnapReader { int64_t sver; int64_t ever; STsdbFS fs; + int8_t type; // for data file int8_t dataDone; int32_t fid; @@ -62,7 +63,8 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) { pReader->iBlockIdx = 0; pReader->pBlockIdx = NULL; - tsdbInfo("vgId:%d vnode snapshot tsdb open data file to read, fid:%d", TD_VID(pTsdb->pVnode), pReader->fid); + tsdbInfo("vgId:%d vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pTsdb->pVnode), pTsdb->path, + pReader->fid); } while (true) { @@ -130,7 +132,7 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) { } SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData); - pHdr->type = 1; + pHdr->type = pReader->type; pHdr->size = size; TABLEID* pId = (TABLEID*)(&pHdr[1]); @@ -139,9 +141,9 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) { tPutBlockData((uint8_t*)(&pId[1]), &pReader->nBlockData); - tsdbInfo("vgId:%d vnode snapshot read data, fid:%d suid:%" PRId64 " uid:%" PRId64 + tsdbInfo("vgId:%d vnode snapshot read data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64 " iBlock:%d minVersion:%d maxVersion:%d nRow:%d out of %d size:%d", - TD_VID(pTsdb->pVnode), pReader->fid, pReader->pBlockIdx->suid, pReader->pBlockIdx->uid, + TD_VID(pTsdb->pVnode), pTsdb->path, pReader->fid, pReader->pBlockIdx->suid, pReader->pBlockIdx->uid, pReader->iBlock - 1, pBlock->minVersion, pBlock->maxVersion, pReader->nBlockData.nRow, pBlock->nRow, size); @@ -154,7 +156,8 @@ _exit: return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb read data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb read data for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path, + tstrerror(code)); return code; } @@ -212,7 +215,7 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) { } SSnapDataHdr* pHdr = (SSnapDataHdr*)(*ppData); - pHdr->type = 2; + pHdr->type = SNAP_DATA_DEL; pHdr->size = size; TABLEID* pId = (TABLEID*)(&pHdr[1]); @@ -228,8 +231,8 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) { n += tPutDelData((*ppData) + n, pDelData); } - tsdbInfo("vgId:%d vnode snapshot tsdb read del data, suid:%" PRId64 " uid:%d" PRId64 " size:%d", - TD_VID(pTsdb->pVnode), pDelIdx->suid, pDelIdx->uid, size); + tsdbInfo("vgId:%d vnode snapshot tsdb read del data for %s, suid:%" PRId64 " uid:%d" PRId64 " size:%d", + TD_VID(pTsdb->pVnode), pTsdb->path, pDelIdx->suid, pDelIdx->uid, size); break; } @@ -238,11 +241,12 @@ _exit: return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb read del failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb read del for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->pVnode, + tstrerror(code)); return code; } -int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapReader** ppReader) { +int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type, STsdbSnapReader** ppReader) { int32_t code = 0; STsdbSnapReader* pReader = NULL; @@ -255,6 +259,7 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapRe pReader->pTsdb = pTsdb; pReader->sver = sver; pReader->ever = ever; + pReader->type = type; code = taosThreadRwlockRdlock(&pTsdb->rwLock); if (code) { @@ -297,12 +302,13 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapRe goto _err; } - tsdbInfo("vgId:%d vnode snapshot tsdb reader opened", TD_VID(pTsdb->pVnode)); + tsdbInfo("vgId:%d vnode snapshot tsdb reader opened for %s", TD_VID(pTsdb->pVnode), pTsdb->path); *ppReader = pReader; return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb reader open for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path, + tstrerror(code)); *ppReader = NULL; return code; } @@ -327,7 +333,7 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) { tsdbFSUnref(pReader->pTsdb, &pReader->fs); - tsdbInfo("vgId:%d vnode snapshot tsdb reader closed", TD_VID(pReader->pTsdb->pVnode)); + tsdbInfo("vgId:%d vnode snapshot tsdb reader closed for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path); taosMemoryFree(pReader); *ppReader = NULL; @@ -368,10 +374,12 @@ int32_t tsdbSnapRead(STsdbSnapReader* pReader, uint8_t** ppData) { } _exit: + tsdbDebug("vgId:%d vnode snapshot tsdb read for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path); return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb read failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb read for %s failed since %s", TD_VID(pReader->pTsdb->pVnode), + pReader->pTsdb->path, tstrerror(code)); return code; } @@ -436,7 +444,8 @@ static int32_t tsdbSnapWriteAppendData(STsdbSnapWriter* pWriter, uint8_t* pData, return code; _err: - tsdbError("vgId:%d tsdb snapshot write append data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d tsdb snapshot write append data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), + pWriter->pTsdb->path, tstrerror(code)); return code; } @@ -522,9 +531,12 @@ static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) { } _exit: + tsdbInfo("vgId:%d tsdb snapshot write table data end for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path); return code; _err: + tsdbError("vgId:%d tsdb snapshot write table data end for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), + pWriter->pTsdb->path, tstrerror(code)); return code; } @@ -570,6 +582,8 @@ _exit: return code; _err: + tsdbError("vgId:%d tsdb snapshot move write table data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), + pWriter->pTsdb->path, tstrerror(code)); return code; } @@ -708,8 +722,8 @@ static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) { return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb write table data impl failed since %s", TD_VID(pWriter->pTsdb->pVnode), - tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb write table data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), + pWriter->pTsdb->path, tstrerror(code)); return code; } @@ -794,11 +808,12 @@ static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) { if (code) goto _err; _exit: + tsdbDebug("vgId:%d vnode snapshot tsdb write data impl for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path); return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb write data impl failed since %s", TD_VID(pWriter->pTsdb->pVnode), - tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb write data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), + pWriter->pTsdb->path, tstrerror(code)); return code; } @@ -833,11 +848,12 @@ static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) { } _exit: - tsdbInfo("vgId:%d vnode snapshot tsdb writer data end", TD_VID(pTsdb->pVnode)); + tsdbInfo("vgId:%d vnode snapshot tsdb writer data end for %s", TD_VID(pTsdb->pVnode), pTsdb->path); return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb writer data end failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb writer data end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path, + tstrerror(code)); return code; } @@ -920,12 +936,13 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3 code = tsdbSnapWriteTableData(pWriter, id); if (code) goto _err; - tsdbInfo("vgId:%d vnode snapshot tsdb write data, fid:%d suid:%" PRId64 " uid:%" PRId64 " nRow:%d", - TD_VID(pTsdb->pVnode), fid, id.suid, id.suid, pBlockData->nRow); + tsdbInfo("vgId:%d vnode snapshot tsdb write data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64 " nRow:%d", + TD_VID(pTsdb->pVnode), pTsdb->path, fid, id.suid, id.suid, pBlockData->nRow); return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb write data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb write data for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path, + tstrerror(code)); return code; } @@ -1015,7 +1032,8 @@ _exit: return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb write del failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb write del for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path, + tstrerror(code)); return code; } @@ -1056,11 +1074,12 @@ static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) { } _exit: - tsdbInfo("vgId:%d vnode snapshot tsdb write del end", TD_VID(pTsdb->pVnode)); + tsdbInfo("vgId:%d vnode snapshot tsdb write del for %s end", TD_VID(pTsdb->pVnode), pTsdb->path); return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb write del end failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb write del end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path, + tstrerror(code)); return code; } @@ -1127,10 +1146,12 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr } *ppWriter = pWriter; - return code; + tsdbInfo("vgId:%d tsdb snapshot writer open for %s succeed", TD_VID(pTsdb->pVnode), pTsdb->path); + return code; _err: - tsdbError("vgId:%d tsdb snapshot writer open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d tsdb snapshot writer open for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path, + tstrerror(code)); *ppWriter = NULL; return code; } @@ -1157,14 +1178,16 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) { if (code) goto _err; } + tsdbInfo("vgId:%d vnode snapshot tsdb writer close for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path); taosMemoryFree(pWriter); *ppWriter = NULL; - return code; _err: - tsdbError("vgId:%d vnode snapshot tsdb writer close failed since %s", TD_VID(pWriter->pTsdb->pVnode), - tstrerror(code)); + tsdbError("vgId:%d vnode snapshot tsdb writer close for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), + pWriter->pTsdb->path, tstrerror(code)); + taosMemoryFree(pWriter); + *ppWriter = NULL; return code; } @@ -1173,7 +1196,7 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) SSnapDataHdr* pHdr = (SSnapDataHdr*)pData; // ts data - if (pHdr->type == 1) { + if (pHdr->type == SNAP_DATA_TSDB) { code = tsdbSnapWriteData(pWriter, pData, nData); if (code) goto _err; @@ -1186,15 +1209,17 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) } // del data - if (pHdr->type == 2) { + if (pHdr->type == SNAP_DATA_DEL) { code = tsdbSnapWriteDel(pWriter, pData, nData); if (code) goto _err; } _exit: + tsdbDebug("vgId:%d tsdb snapshow write for %s succeed", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path); return code; _err: - tsdbError("vgId:%d tsdb snapshow write failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code)); + tsdbError("vgId:%d tsdb snapshow write for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path, + tstrerror(code)); return code; } diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c index 3e05b75dd010ec2336115efa8354e2892c07afa7..d612e9bb10146b8a9b8a2176cef61835e52d7725 100644 --- a/source/dnode/vnode/src/tsdb/tsdbUtil.c +++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include "tdataformat.h" #include "tsdb.h" // SMapData ======================================================================= @@ -151,26 +152,6 @@ int32_t tTABLEIDCmprFn(const void *p1, const void *p2) { return 0; } -// TSDBKEY ======================================================================= -int32_t tsdbKeyCmprFn(const void *p1, const void *p2) { - TSDBKEY *pKey1 = (TSDBKEY *)p1; - TSDBKEY *pKey2 = (TSDBKEY *)p2; - - if (pKey1->ts < pKey2->ts) { - return -1; - } else if (pKey1->ts > pKey2->ts) { - return 1; - } - - if (pKey1->version < pKey2->version) { - return -1; - } else if (pKey1->version > pKey2->version) { - return 1; - } - - return 0; -} - // TSDBKEY ====================================================== static FORCE_INLINE int32_t tPutTSDBKEY(uint8_t *p, TSDBKEY *pKey) { int32_t n = 0; @@ -568,6 +549,103 @@ SColVal *tRowIterNext(SRowIter *pIter) { } // SRowMerger ====================================================== + +int32_t tRowMergerInit2(SRowMerger *pMerger, STSchema *pResTSchema, TSDBROW *pRow, STSchema *pTSchema) { + int32_t code = 0; + TSDBKEY key = TSDBROW_KEY(pRow); + SColVal *pColVal = &(SColVal){0}; + STColumn *pTColumn; + int32_t iCol, jCol = 0; + + pMerger->pTSchema = pResTSchema; + pMerger->version = key.version; + + pMerger->pArray = taosArrayInit(pResTSchema->numOfCols, sizeof(SColVal)); + if (pMerger->pArray == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + // ts + pTColumn = &pTSchema->columns[jCol++]; + + ASSERT(pTColumn->type == TSDB_DATA_TYPE_TIMESTAMP); + + *pColVal = COL_VAL_VALUE(pTColumn->colId, pTColumn->type, (SValue){.ts = key.ts}); + if (taosArrayPush(pMerger->pArray, pColVal) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + + // other + for (iCol = 1; jCol < pTSchema->numOfCols && iCol < pResTSchema->numOfCols; ++iCol) { + pTColumn = &pResTSchema->columns[iCol]; + if (pTSchema->columns[jCol].colId < pTColumn->colId) { + ++jCol; + --iCol; + continue; + } else if (pTSchema->columns[jCol].colId > pTColumn->colId) { + taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type)); + continue; + } + + tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal); + if (taosArrayPush(pMerger->pArray, pColVal) == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; + goto _exit; + } + } + + for (; iCol < pResTSchema->numOfCols; ++iCol) { + pTColumn = &pResTSchema->columns[iCol]; + taosArrayPush(pMerger->pArray, &COL_VAL_NONE(pTColumn->colId, pTColumn->type)); + } + +_exit: + return code; +} + +int32_t tRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) { + int32_t code = 0; + TSDBKEY key = TSDBROW_KEY(pRow); + SColVal *pColVal = &(SColVal){0}; + STColumn *pTColumn; + int32_t iCol, jCol = 1; + + ASSERT(((SColVal *)pMerger->pArray->pData)->value.ts == key.ts); + + for (iCol = 1; iCol < pMerger->pTSchema->numOfCols && jCol < pTSchema->numOfCols; ++iCol) { + pTColumn = &pMerger->pTSchema->columns[iCol]; + if (pTSchema->columns[jCol].colId < pTColumn->colId) { + ++jCol; + --iCol; + continue; + } else if (pTSchema->columns[jCol].colId > pTColumn->colId) { + continue; + } + + tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal); + + if (key.version > pMerger->version) { + if (!pColVal->isNone) { + taosArraySet(pMerger->pArray, iCol, pColVal); + } + } else if (key.version < pMerger->version) { + SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol); + if (tColVal->isNone && !pColVal->isNone) { + taosArraySet(pMerger->pArray, iCol, pColVal); + } + } else { + ASSERT(0); + } + } + + pMerger->version = key.version; + +_exit: + return code; +} + int32_t tRowMergerInit(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) { int32_t code = 0; TSDBKEY key = TSDBROW_KEY(pRow); @@ -1401,7 +1479,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { break; case TSDB_DATA_TYPE_BOOL: break; - case TSDB_DATA_TYPE_TINYINT:{ + case TSDB_DATA_TYPE_TINYINT: { pColAgg->sum += colVal.value.i8; if (pColAgg->min > colVal.value.i8) { pColAgg->min = colVal.value.i8; @@ -1411,7 +1489,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_SMALLINT:{ + case TSDB_DATA_TYPE_SMALLINT: { pColAgg->sum += colVal.value.i16; if (pColAgg->min > colVal.value.i16) { pColAgg->min = colVal.value.i16; @@ -1441,7 +1519,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_FLOAT:{ + case TSDB_DATA_TYPE_FLOAT: { pColAgg->sum += colVal.value.f; if (pColAgg->min > colVal.value.f) { pColAgg->min = colVal.value.f; @@ -1451,7 +1529,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_DOUBLE:{ + case TSDB_DATA_TYPE_DOUBLE: { pColAgg->sum += colVal.value.d; if (pColAgg->min > colVal.value.d) { pColAgg->min = colVal.value.d; @@ -1463,7 +1541,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } case TSDB_DATA_TYPE_VARCHAR: break; - case TSDB_DATA_TYPE_TIMESTAMP:{ + case TSDB_DATA_TYPE_TIMESTAMP: { if (pColAgg->min > colVal.value.i64) { pColAgg->min = colVal.value.i64; } @@ -1474,7 +1552,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } case TSDB_DATA_TYPE_NCHAR: break; - case TSDB_DATA_TYPE_UTINYINT:{ + case TSDB_DATA_TYPE_UTINYINT: { pColAgg->sum += colVal.value.u8; if (pColAgg->min > colVal.value.u8) { pColAgg->min = colVal.value.u8; @@ -1484,7 +1562,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_USMALLINT:{ + case TSDB_DATA_TYPE_USMALLINT: { pColAgg->sum += colVal.value.u16; if (pColAgg->min > colVal.value.u16) { pColAgg->min = colVal.value.u16; @@ -1494,7 +1572,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_UINT:{ + case TSDB_DATA_TYPE_UINT: { pColAgg->sum += colVal.value.u32; if (pColAgg->min > colVal.value.u32) { pColAgg->min = colVal.value.u32; @@ -1504,7 +1582,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) { } break; } - case TSDB_DATA_TYPE_UBIGINT:{ + case TSDB_DATA_TYPE_UBIGINT: { pColAgg->sum += colVal.value.u64; if (pColAgg->min > colVal.value.u64) { pColAgg->min = colVal.value.u64; diff --git a/source/dnode/vnode/src/vnd/vnodeSnapshot.c b/source/dnode/vnode/src/vnd/vnodeSnapshot.c index 3f8f81cb09a3ce270b9a1063dcb2bc0bd98e94e5..2bdd7fb513ad380f402ab1282798207551fe8447 100644 --- a/source/dnode/vnode/src/vnd/vnodeSnapshot.c +++ b/source/dnode/vnode/src/vnd/vnodeSnapshot.c @@ -27,8 +27,19 @@ struct SVSnapReader { // tsdb int8_t tsdbDone; STsdbSnapReader *pTsdbReader; + // tq + int8_t tqHandleDone; + STqSnapReader *pTqSnapReader; + int8_t tqOffsetDone; + STqOffsetReader *pTqOffsetReader; + // stream + int8_t streamTaskDone; + SStreamTaskReader *pStreamTaskReader; + int8_t streamStateDone; + SStreamStateReader *pStreamStateReader; // rsma - int8_t rsmaDone[TSDB_RETENTION_L2]; + int8_t rsmaDone; + SRsmaSnapReader *pRsmaReader; }; int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader) { @@ -57,6 +68,10 @@ _err: int32_t vnodeSnapReaderClose(SVSnapReader *pReader) { int32_t code = 0; + if (pReader->pRsmaReader) { + rsmaSnapReaderClose(&pReader->pRsmaReader); + } + if (pReader->pTsdbReader) { tsdbSnapReaderClose(&pReader->pTsdbReader); } @@ -99,7 +114,8 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData) if (!pReader->tsdbDone) { // open if not if (pReader->pTsdbReader == NULL) { - code = tsdbSnapReaderOpen(pReader->pVnode->pTsdb, pReader->sver, pReader->ever, &pReader->pTsdbReader); + code = tsdbSnapReaderOpen(pReader->pVnode->pTsdb, pReader->sver, pReader->ever, SNAP_DATA_TSDB, + &pReader->pTsdbReader); if (code) goto _err; } @@ -117,41 +133,73 @@ int32_t vnodeSnapRead(SVSnapReader *pReader, uint8_t **ppData, uint32_t *nData) } } + // TQ ================ + if (!pReader->tqHandleDone) { + if (pReader->pTqSnapReader == NULL) { + code = tqSnapReaderOpen(pReader->pVnode->pTq, pReader->sver, pReader->ever, &pReader->pTqSnapReader); + if (code < 0) goto _err; + } + + code = tqSnapRead(pReader->pTqSnapReader, ppData); + if (code) { + goto _err; + } else { + if (*ppData) { + goto _exit; + } else { + pReader->tqHandleDone = 1; + code = tqSnapReaderClose(&pReader->pTqSnapReader); + if (code) goto _err; + } + } + } + if (!pReader->tqOffsetDone) { + if (pReader->pTqOffsetReader == NULL) { + code = tqOffsetReaderOpen(pReader->pVnode->pTq, pReader->sver, pReader->ever, &pReader->pTqOffsetReader); + if (code < 0) goto _err; + } + + code = tqOffsetSnapRead(pReader->pTqOffsetReader, ppData); + if (code) { + goto _err; + } else { + if (*ppData) { + goto _exit; + } else { + pReader->tqHandleDone = 1; + code = tqOffsetReaderClose(&pReader->pTqOffsetReader); + if (code) goto _err; + } + } + } + + // STREAM ============ + if (!pReader->streamTaskDone) { + } + if (!pReader->streamStateDone) { + } + // RSMA ============== -#if 0 - if (VND_IS_RSMA(pReader->pVnode)) { - // RSMA1/RSMA2 - for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { - if (!pReader->rsmaDone[i]) { - if (!pReader->pVnode->pSma->pRSmaTsdb[i]) { - // no valid tsdb - pReader->rsmaDone[i] = 1; - continue; - } - if (pReader->pTsdbReader == NULL) { - code = tsdbSnapReaderOpen(pReader->pVnode->pSma->pRSmaTsdb[i], pReader->sver, pReader->ever, - &pReader->pTsdbReader); - if (code) goto _err; - } - - code = tsdbSnapRead(pReader->pTsdbReader, ppData); - if (code) { - goto _err; - } else { - if (*ppData) { - goto _exit; - } else { - pReader->tsdbDone = 1; - code = tsdbSnapReaderClose(&pReader->pTsdbReader); - if (code) goto _err; - } - } + if (VND_IS_RSMA(pReader->pVnode) && !pReader->rsmaDone) { + // open if not + if (pReader->pRsmaReader == NULL) { + code = rsmaSnapReaderOpen(pReader->pVnode->pSma, pReader->sver, pReader->ever, &pReader->pRsmaReader); + if (code) goto _err; + } + + code = rsmaSnapRead(pReader->pRsmaReader, ppData); + if (code) { + goto _err; + } else { + if (*ppData) { + goto _exit; + } else { + pReader->tsdbDone = 1; + code = rsmaSnapReaderClose(&pReader->pRsmaReader); + if (code) goto _err; } } - // QTaskInfoFile - // TODO ... } -#endif *ppData = NULL; *nData = 0; @@ -186,6 +234,14 @@ struct SVSnapWriter { SMetaSnapWriter *pMetaSnapWriter; // tsdb STsdbSnapWriter *pTsdbSnapWriter; + // tq + STqSnapWriter *pTqSnapWriter; + STqOffsetWriter *pTqOffsetWriter; + // stream + SStreamTaskWriter *pStreamTaskWriter; + SStreamStateWriter *pStreamStateWriter; + // rsma + SRsmaSnapWriter *pRsmaSnapWriter; }; int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWriter **ppWriter) { @@ -235,6 +291,11 @@ int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot * if (code) goto _err; } + if (pWriter->pRsmaSnapWriter) { + code = rsmaSnapWriterClose(&pWriter->pRsmaSnapWriter, rollback); + if (code) goto _err; + } + if (!rollback) { SVnodeInfo info = {0}; char dir[TSDB_FILENAME_LEN]; @@ -282,28 +343,59 @@ int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) { vInfo("vgId:%d vnode snapshot write data, index:%" PRId64 " type:%d nData:%d", TD_VID(pVnode), pHdr->index, pHdr->type, nData); - if (pHdr->type == 0) { - // meta + switch (pHdr->type) { + case SNAP_DATA_META: { + // meta + if (pWriter->pMetaSnapWriter == NULL) { + code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter); + if (code) goto _err; + } - if (pWriter->pMetaSnapWriter == NULL) { - code = metaSnapWriterOpen(pVnode->pMeta, pWriter->sver, pWriter->ever, &pWriter->pMetaSnapWriter); + code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData); if (code) goto _err; - } + } break; + case SNAP_DATA_TSDB: { + // tsdb + if (pWriter->pTsdbSnapWriter == NULL) { + code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter); + if (code) goto _err; + } - code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData); - if (code) goto _err; - } else { - // tsdb + code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData); + if (code) goto _err; + } break; + case SNAP_DATA_TQ_HANDLE: { + } break; + case SNAP_DATA_TQ_OFFSET: { + } break; + case SNAP_DATA_STREAM_TASK: { + } break; + case SNAP_DATA_STREAM_STATE: { + } break; + case SNAP_DATA_RSMA1: + case SNAP_DATA_RSMA2: { + // rsma1/rsma2 + if (pWriter->pRsmaSnapWriter == NULL) { + code = rsmaSnapWriterOpen(pVnode->pSma, pWriter->sver, pWriter->ever, &pWriter->pRsmaSnapWriter); + if (code) goto _err; + } - if (pWriter->pTsdbSnapWriter == NULL) { - code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter); + code = rsmaSnapWrite(pWriter->pRsmaSnapWriter, pData, nData); if (code) goto _err; - } + } break; + case SNAP_DATA_QTASK: { + // qtask for rsma + if (pWriter->pRsmaSnapWriter == NULL) { + code = rsmaSnapWriterOpen(pVnode->pSma, pWriter->sver, pWriter->ever, &pWriter->pRsmaSnapWriter); + if (code) goto _err; + } - code = tsdbSnapWrite(pWriter->pTsdbSnapWriter, pData, nData); - if (code) goto _err; + code = rsmaSnapWrite(pWriter->pRsmaSnapWriter, pData, nData); + if (code) goto _err; + } break; + default: + break; } - _exit: return code; @@ -311,4 +403,4 @@ _err: vError("vgId:%d vnode snapshot write failed since %s, index:%" PRId64 " type:%d nData:%d", TD_VID(pVnode), tstrerror(code), pHdr->index, pHdr->type, nData); return code; -} \ No newline at end of file +} diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index a83e1ab85b0b0f7172eafadf0f94ebc3777102ca..8aa3a7d296723645e05591a053af6542b4a7373b 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -297,8 +297,8 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) { int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { vTrace("message in fetch queue is processing"); - if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || - pMsg->msgType == TDMT_VND_TABLE_CFG || pMsg->msgType == TDMT_VND_BATCH_META) && + if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG || + pMsg->msgType == TDMT_VND_BATCH_META) && !vnodeIsLeader(pVnode)) { vnodeRedirectRpcMsg(pVnode, pMsg); return 0; @@ -447,6 +447,7 @@ _err: static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) { SDecoder decoder = {0}; + SEncoder encoder = {0}; int32_t rcode = 0; SVCreateTbBatchReq req = {0}; SVCreateTbReq *pCreateReq; @@ -485,7 +486,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR rcode = -1; goto _exit; } - + // validate hash sprintf(tbName, "%s.%s", pVnode->config.dbname, pCreateReq->name); if (vnodeValidateTableHash(pVnode, tbName) < 0) { @@ -515,8 +516,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR tdUidStoreFree(pStore); // prepare rsp - SEncoder encoder = {0}; - int32_t ret = 0; + int32_t ret = 0; tEncodeSize(tEncodeSVCreateTbBatchRsp, &rsp, pRsp->contLen, ret); pRsp->pCont = rpcMallocCont(pRsp->contLen); if (pRsp->pCont == NULL) { @@ -977,6 +977,9 @@ static int32_t vnodeProcessAlterHashRangeReq(SVnode *pVnode, int64_t version, vo static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) { SAlterVnodeReq alterReq = {0}; + bool walChanged = false; + bool tsdbChanged = false; + if (tDeserializeSAlterVnodeReq(pReq, len, &alterReq) != 0) { terrno = TSDB_CODE_INVALID_MSG; return TSDB_CODE_INVALID_MSG; @@ -986,9 +989,54 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void alterReq.cacheLastSize); if (pVnode->config.cacheLastSize != alterReq.cacheLastSize) { pVnode->config.cacheLastSize = alterReq.cacheLastSize; - // TODO: save config tsdbCacheSetCapacity(pVnode, (size_t)pVnode->config.cacheLastSize * 1024 * 1024); } + + if (pVnode->config.cacheLast != alterReq.cacheLast) { + pVnode->config.cacheLast = alterReq.cacheLast; + } + + if (pVnode->config.walCfg.fsyncPeriod != alterReq.walFsyncPeriod) { + pVnode->config.walCfg.fsyncPeriod = alterReq.walFsyncPeriod; + + walChanged = true; + } + + if (pVnode->config.walCfg.level != alterReq.walLevel) { + pVnode->config.walCfg.level = alterReq.walLevel; + + walChanged = true; + } + + if (pVnode->config.tsdbCfg.keep0 != alterReq.daysToKeep0) { + pVnode->config.tsdbCfg.keep0 = alterReq.daysToKeep0; + if (!VND_IS_RSMA(pVnode)) { + tsdbChanged = true; + } + } + + if (pVnode->config.tsdbCfg.keep1 != alterReq.daysToKeep1) { + pVnode->config.tsdbCfg.keep1 = alterReq.daysToKeep1; + if (!VND_IS_RSMA(pVnode)) { + tsdbChanged = true; + } + } + + if (pVnode->config.tsdbCfg.keep2 != alterReq.daysToKeep2) { + pVnode->config.tsdbCfg.keep2 = alterReq.daysToKeep2; + if (!VND_IS_RSMA(pVnode)) { + tsdbChanged = true; + } + } + + if (walChanged) { + walAlter(pVnode->pWal, &pVnode->config.walCfg); + } + + if (tsdbChanged) { + tsdbSetKeepCfg(pVnode->pTsdb, &pVnode->config.tsdbCfg); + } + return 0; } @@ -1021,10 +1069,10 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq taosArrayDestroy(pRes->uidList); SVDeleteRsp rsp = {.affectedRows = pRes->affectedRows}; - int32_t ret = 0; + int32_t ret = 0; tEncodeSize(tEncodeSVDeleteRsp, &rsp, pRsp->contLen, ret); pRsp->pCont = rpcMallocCont(pRsp->contLen); - SEncoder ec = {0}; + SEncoder ec = {0}; tEncoderInit(&ec, pRsp->pCont, pRsp->contLen); tEncodeSVDeleteRsp(&ec, &rsp); tEncoderClear(&ec); diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 1aaa1ecfd723d573e885975fdd3dfd5afd4ba55f..5b5c6010e84794bee6aaa0f0fab2f8d7c4022210 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -679,6 +679,8 @@ void ctgClearSubTaskRes(SCtgSubRes *pRes); void ctgFreeQNode(SCtgQNode *node); void ctgClearHandle(SCatalog* pCtg); void ctgFreeTbCacheImpl(SCtgTbCache *pCache); +int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName); +int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup); extern SCatalogMgmt gCtgMgmt; diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c index 59f11898fa4f3d944223c027009bc9fc9d8216aa..933e65e582274711ad194d6a74ca5cbec682ef49 100644 --- a/source/libs/catalog/src/catalog.c +++ b/source/libs/catalog/src/catalog.c @@ -92,7 +92,7 @@ int32_t ctgRefreshTbMeta(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx* int32_t code = 0; if (!CTG_FLAG_IS_SYS_DB(ctx->flag)) { - CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pConn, ctx->pName, &vgroupInfo)); + CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, ctx->pName, &vgroupInfo)); } STableMetaOutput moutput = {0}; @@ -337,7 +337,10 @@ int32_t ctgGetTbType(SCatalog* pCtg, SRequestConnInfo *pConn, SName* pTableName, } STableMeta* pMeta = NULL; - CTG_ERR_RET(catalogGetTableMeta(pCtg, pConn, pTableName, &pMeta)); + SCtgTbMetaCtx ctx = {0}; + ctx.pName = (SName*)pTableName; + ctx.flag = CTG_FLAG_UNKNOWN_STB; + CTG_ERR_RET(ctgGetTbMeta(pCtg, pConn, &ctx, &pMeta)); *tbType = pMeta->tableType; taosMemoryFree(pMeta); @@ -391,7 +394,7 @@ int32_t ctgGetTbCfg(SCatalog* pCtg, SRequestConnInfo *pConn, SName* pTableName, CTG_ERR_RET(ctgGetTableCfgFromMnode(pCtg, pConn, pTableName, pCfg, NULL)); } else { SVgroupInfo vgroupInfo = {0}; - CTG_ERR_RET(catalogGetTableHashVgroup(pCtg, pConn, pTableName, &vgroupInfo)); + CTG_ERR_RET(ctgGetTbHashVgroup(pCtg, pConn, pTableName, &vgroupInfo)); CTG_ERR_RET(ctgGetTableCfgFromVnode(pCtg, pConn, pTableName, &vgroupInfo, pCfg, NULL)); } @@ -477,6 +480,57 @@ _return: CTG_RET(code); } + +int32_t ctgGetTbHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) { + if (IS_SYS_DBNAME(pTableName->dbname)) { + ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname); + CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); + } + + SCtgDBCache* dbCache = NULL; + int32_t code = 0; + char db[TSDB_DB_FNAME_LEN] = {0}; + tNameGetFullDbName(pTableName, db); + + SDBVgInfo *vgInfo = NULL; + CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo)); + + CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup)); + +_return: + + if (dbCache) { + ctgRUnlockVgInfo(dbCache); + ctgReleaseDBCache(pCtg, dbCache); + } + + if (vgInfo) { + taosHashCleanup(vgInfo->vgHash); + taosMemoryFreeClear(vgInfo); + } + + CTG_RET(code); +} + +int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName) { + int32_t code = 0; + + if (NULL == pCtg || NULL == pTableName) { + CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT); + } + + if (NULL == pCtg->dbCache) { + return TSDB_CODE_SUCCESS; + } + + CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true)); + +_return: + + CTG_RET(code); +} + + int32_t catalogInit(SCatalogCfg* cfg) { if (gCtgMgmt.pCluster) { qError("catalog already initialized"); @@ -772,21 +826,7 @@ _return: int32_t catalogRemoveTableMeta(SCatalog* pCtg, SName* pTableName) { CTG_API_ENTER(); - int32_t code = 0; - - if (NULL == pCtg || NULL == pTableName) { - CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); - } - - if (NULL == pCtg->dbCache) { - CTG_API_LEAVE(TSDB_CODE_SUCCESS); - } - - CTG_ERR_JRET(ctgRemoveTbMetaFromCache(pCtg, pTableName, true)); - -_return: - - CTG_API_LEAVE(code); + CTG_API_LEAVE(ctgRemoveTbMeta(pCtg, pTableName)); } int32_t catalogRemoveStbMeta(SCatalog* pCtg, const char* dbFName, uint64_t dbId, const char* stbName, uint64_t suid) { @@ -878,12 +918,12 @@ int32_t catalogChkTbMetaVersion(SCatalog* pCtg, SRequestConnInfo *pConn, SArray* case TSDB_CHILD_TABLE: { SName stb = name; strcpy(stb.tname, stbName); - catalogRemoveTableMeta(pCtg, &stb); + ctgRemoveTbMeta(pCtg, &stb); break; } case TSDB_SUPER_TABLE: case TSDB_NORMAL_TABLE: - catalogRemoveTableMeta(pCtg, &name); + ctgRemoveTbMeta(pCtg, &name); break; default: ctgError("ignore table type %d", tbType); @@ -947,34 +987,7 @@ int32_t catalogGetTableDistVgInfo(SCatalog* pCtg, SRequestConnInfo *pConn, const int32_t catalogGetTableHashVgroup(SCatalog *pCtg, SRequestConnInfo *pConn, const SName *pTableName, SVgroupInfo *pVgroup) { CTG_API_ENTER(); - if (IS_SYS_DBNAME(pTableName->dbname)) { - ctgError("no valid vgInfo for db, dbname:%s", pTableName->dbname); - CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT); - } - - SCtgDBCache* dbCache = NULL; - int32_t code = 0; - char db[TSDB_DB_FNAME_LEN] = {0}; - tNameGetFullDbName(pTableName, db); - - SDBVgInfo *vgInfo = NULL; - CTG_ERR_JRET(ctgGetDBVgInfo(pCtg, pConn, db, &dbCache, &vgInfo)); - - CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, vgInfo ? vgInfo : dbCache->vgCache.vgInfo, pTableName, pVgroup)); - -_return: - - if (dbCache) { - ctgRUnlockVgInfo(dbCache); - ctgReleaseDBCache(pCtg, dbCache); - } - - if (vgInfo) { - taosHashCleanup(vgInfo->vgHash); - taosMemoryFreeClear(vgInfo); - } - - CTG_API_LEAVE(code); + CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, pConn, pTableName, pVgroup)); } int32_t catalogGetAllMeta(SCatalog* pCtg, SRequestConnInfo *pConn, const SCatalogReq* pReq, SMetaData* pRsp) { @@ -1200,7 +1213,7 @@ int32_t catalogRefreshGetTableCfg(SCatalog* pCtg, SRequestConnInfo *pConn, const } int32_t code = 0; - CTG_ERR_JRET(catalogRemoveTableMeta(pCtg, (SName*)pTableName)); + CTG_ERR_JRET(ctgRemoveTbMeta(pCtg, (SName*)pTableName)); CTG_ERR_JRET(ctgGetTbCfg(pCtg, pConn, (SName*)pTableName, pCfg)); diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index f4cee13ec0e6f8ad67cf01e557bd7bcce3aa1f91..0184ac3a607a3dbf6c0ddec6cd9c8ea2df3e3781 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -398,7 +398,7 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con SName* name = taosHashIterate(pTb, NULL); while (name) { - catalogRemoveTableMeta(pCtg, name); + ctgRemoveTbMeta(pCtg, name); name = taosHashIterate(pTb, name); } diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index e61becbe17f6250702cda1ced553a48b5190daab..bdf60b110b082ffef530b76dbc033ac2fa272506 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -30,12 +30,20 @@ void ctgFreeMsgSendParam(void* param) { taosMemoryFree(param); } +void ctgFreeBatchMsg(void* msg) { + if (NULL == msg) { + return; + } + SBatchMsg* pMsg = (SBatchMsg*)msg; + taosMemoryFree(pMsg->msg); +} + void ctgFreeBatch(SCtgBatch *pBatch) { if (NULL == pBatch) { return; } - taosArrayDestroy(pBatch->pMsgs); + taosArrayDestroyEx(pBatch->pMsgs, ctgFreeBatchMsg); taosArrayDestroy(pBatch->pTaskIds); } diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 104c007756bce6e34d28d91bdc07c88a2ab06f1d..2ae4666ff6c20580c63034ad368e5d038a1ccd96 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -63,6 +63,8 @@ extern "C" { #define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms" //append area +#define EXPLAIN_LIMIT_FORMAT "limit=%" PRId64 +#define EXPLAIN_SLIMIT_FORMAT "slimit=%" PRId64 #define EXPLAIN_LEFT_PARENTHESIS_FORMAT " (" #define EXPLAIN_RIGHT_PARENTHESIS_FORMAT ")" #define EXPLAIN_BLANK_FORMAT " " @@ -81,6 +83,8 @@ extern "C" { #define EXPLAIN_STRING_TYPE_FORMAT "%s" #define EXPLAIN_INPUT_ORDER_FORMAT "input_order=%s" #define EXPLAIN_OUTPUT_ORDER_TYPE_FORMAT "output_order=%s" +#define EXPLAIN_OFFSET_FORMAT "offset=%d" +#define EXPLAIN_SOFFSET_FORMAT "soffset=%d" #define COMMAND_RESET_LOG "resetLog" #define COMMAND_SCHEDULE_POLICY "schedulePolicy" @@ -147,6 +151,21 @@ typedef struct SExplainCtx { #define EXPLAIN_SUM_ROW_NEW(...) tlen = snprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE - VARSTR_HEADER_SIZE, __VA_ARGS__) #define EXPLAIN_SUM_ROW_END() do { varDataSetLen(tbuf, tlen); tlen += VARSTR_HEADER_SIZE; } while (0) +#define EXPLAIN_ROW_APPEND_LIMIT_IMPL(_pLimit, sl) do { \ + if (_pLimit) { \ + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); \ + SLimitNode* pLimit = (SLimitNode*)_pLimit; \ + EXPLAIN_ROW_APPEND(((sl) ? EXPLAIN_SLIMIT_FORMAT : EXPLAIN_LIMIT_FORMAT), pLimit->limit); \ + if (pLimit->offset) { \ + EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); \ + EXPLAIN_ROW_APPEND(((sl) ? EXPLAIN_SOFFSET_FORMAT : EXPLAIN_OFFSET_FORMAT), pLimit->offset);\ + } \ + } \ +} while (0) + +#define EXPLAIN_ROW_APPEND_LIMIT(_pLimit) EXPLAIN_ROW_APPEND_LIMIT_IMPL(_pLimit, false) +#define EXPLAIN_ROW_APPEND_SLIMIT(_pLimit) EXPLAIN_ROW_APPEND_LIMIT_IMPL(_pLimit, true) + #ifdef __cplusplus } #endif diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 7c95e71823ca227420adfcc9054cf15cf838a89e..a76b4574229c06b24d0da641dbe7bca67e2dbe2e 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -517,6 +517,13 @@ static int32_t execAlterLocal(SAlterLocalStmt* pStmt) { goto _return; } + bool forbidden = false; + taosLocalCfgForbiddenToChange(pStmt->config, &forbidden); + if (forbidden) { + terrno = TSDB_CODE_OPS_NOT_SUPPORT; + return terrno; + } + if (cfgSetItem(tsCfg, pStmt->config, pStmt->value, CFG_STYPE_ALTER_CMD)) { return terrno; } diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 66a94f7e2832a5ddd961baf6e69aef25e2efcc03..c080b666cc5ac2e6dc4628e4ccf916ffc7735e47 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -417,6 +417,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pTagScanNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTagScanNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pTagScanNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pTagScanNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -523,6 +525,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pTblScanNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pTblScanNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pTblScanNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pTblScanNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -567,6 +571,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pSTblScanNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSTblScanNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pSTblScanNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pSTblScanNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -576,7 +582,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -601,6 +607,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pPrjNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPrjNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pPrjNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pPrjNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -610,7 +618,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -635,6 +643,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pJoinNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pJoinNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pJoinNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pJoinNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -650,7 +660,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i QRY_ERR_RET( nodesNodeToSQL(pJoinNode->pOnConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); - QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); } break; } @@ -681,6 +691,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pAggNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pAggNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pAggNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -690,7 +702,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -717,6 +729,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pIndefNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIndefNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pIndefNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pIndefNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -726,7 +740,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -755,6 +769,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pExchNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pExchNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pExchNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pExchNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -764,7 +780,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } QRY_ERR_RET(qExplainAppendGroupResRows(ctx, pExchNode->srcGroupId, level + 1)); @@ -826,6 +842,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pSortNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSortNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pSortNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pSortNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -835,7 +853,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -864,6 +882,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pIntNode->window.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pIntNode->window.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pIntNode->window.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); uint8_t precision = getIntervalPrecision(pIntNode); @@ -881,7 +901,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -906,6 +926,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pIntNode->window.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pIntNode->window.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pIntNode->window.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); uint8_t precision = getIntervalPrecision(pIntNode); @@ -923,7 +945,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -948,6 +970,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pFillNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pFillNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pFillNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pFillNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); if (pFillNode->pValues) { @@ -980,7 +1004,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1005,6 +1029,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pSessNode->window.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSessNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pSessNode->window.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pSessNode->window.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1018,7 +1044,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1046,6 +1072,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pStateNode->window.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pStateNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pStateNode->window.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pStateNode->window.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1058,7 +1086,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1084,6 +1112,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pPartNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pPartNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pPartNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pPartNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1093,7 +1123,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1153,6 +1183,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pMergeNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pMergeNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pMergeNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pMergeNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1172,7 +1204,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1202,6 +1234,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pDistScanNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pDistScanNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pDistScanNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pDistScanNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1211,7 +1245,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1241,6 +1275,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pLastRowNode->scan.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pLastRowNode->scan.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pLastRowNode->scan.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pLastRowNode->scan.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1250,7 +1286,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1310,6 +1346,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pSortNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pSortNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pSortNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pSortNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); @@ -1319,7 +1357,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1342,6 +1380,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pIntNode->window.node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pIntNode->window.node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pIntNode->window.node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pIntNode->window.node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); uint8_t precision = getIntervalPrecision(pIntNode); @@ -1359,7 +1399,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } @@ -1389,6 +1429,8 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i nodesGetOutputNumFromSlotList(pInterpNode->node.pOutputDataBlockDesc->pSlots)); EXPLAIN_ROW_APPEND(EXPLAIN_BLANK_FORMAT); EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pInterpNode->node.pOutputDataBlockDesc->outputRowSize); + EXPLAIN_ROW_APPEND_LIMIT(pInterpNode->node.pLimit); + EXPLAIN_ROW_APPEND_SLIMIT(pInterpNode->node.pSlimit); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); if (pInterpNode->pFillValues) { @@ -1424,7 +1466,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen)); EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); - } + } } break; } diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index d587e201ef5f2a20f0e8019a54492477dd118f76..577f9772be1223ba29bc087872f6f5e2bea9f57f 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -437,6 +437,7 @@ typedef struct SessionWindowSupporter { SStreamAggSupporter* pStreamAggSup; int64_t gap; uint8_t parentType; + SAggSupporter* pIntervalAggSup; } SessionWindowSupporter; typedef struct STimeWindowSupp { @@ -1009,6 +1010,7 @@ int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TSKEY* pEndTs, int32_t rows, int32_t start, int64_t gap, SHashObj* pStDeleted); bool functionNeedToExecute(SqlFunctionCtx* pCtx); bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup); +bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup); void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid); void printDataBlock(SSDataBlock* pBlock, const char* flag); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 8f3ecafbea1e9574d718a521c190c811cff02419..775017b8dd4c1257a92eec8c100a400d00e7f2f0 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -48,7 +48,6 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu pOperator->status = OP_NOT_OPENED; SStreamScanInfo* pInfo = pOperator->info; - /*pInfo->assignBlockUid = assignUid;*/ // TODO: if a block was set but not consumed, // prevent setting a different type of block diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index d031494057a659e2ff7dca0b0877f7365563d3ea..e52cbf40a9470bf9a8ee6e725a94240e9ba63493 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -672,6 +672,10 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc numOfRows = pfCtx->fpSet.process(pfCtx); } else if (fmIsAggFunc(pfCtx->functionId)) { + // selective value output should be set during corresponding function execution + if (fmIsSelectValueFunc(pfCtx->functionId)) { + continue; + } // _group_key function for "partition by tbname" + csum(col_name) query SColumnInfoData* pOutput = taosArrayGet(pResult->pDataBlock, outputSlotId); int32_t slotId = pfCtx->param[0].pCol->slotId; diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index 17b81cdb827fe11b5de3e27233fd0add211aa32d..4134ce5dbfdd8a661082b2887fdbef5972e86341 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -323,8 +323,6 @@ static void doMergeJoinImpl(struct SOperatorInfo* pOperator, SSDataBlock* pRes) } if (leftTs == rightTs) { - mergeJoinJoinLeftRight(pOperator, pRes, nrows, pJoinInfo->pLeft, pJoinInfo->leftPos, pJoinInfo->pRight, - pJoinInfo->rightPos); mergeJoinJoinDownstreamTsRanges(pOperator, leftTs, pRes, &nrows); } else if (asc && leftTs < rightTs || !asc && leftTs > rightTs) { pJoinInfo->leftPos += 1; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index b8257a49f92154254df9900e5702dc4bc49c4172..99fe8db5d94540de80d405b05d87ed0568817353 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -483,10 +483,6 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) { pBlock->info.groupId = *groupId; } - if (pTableScanInfo->assignBlockUid) { - pBlock->info.groupId = pBlock->info.uid; - } - pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows; pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0; @@ -1137,7 +1133,8 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[rowId], &pInfo->interval, TSDB_ORDER_ASC); // must check update info first. bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]); - if ((update || (isSignleIntervalWindow(pInfo) && isCloseWindow(&win, &pInfo->twAggSup))) && out) { + if ((update || (isSignleIntervalWindow(pInfo) && isCloseWindow(&win, &pInfo->twAggSup) && + isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup))) && out) { appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid); } } @@ -1176,12 +1173,6 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock pInfo->pRes->info.groupId = 0; } - // for generating rollup SMA result, each time is an independent time serie. - // TODO temporarily used, when the statement of "partition by tbname" is ready, remove this - if (pInfo->assignBlockUid) { - pInfo->pRes->info.groupId = pBlock->info.uid; - } - // todo extract method for (int32_t i = 0; i < taosArrayGetSize(pInfo->pColMatchInfo); ++i) { SColMatchInfo* pColMatchInfo = taosArrayGet(pInfo->pColMatchInfo, i); @@ -1349,6 +1340,9 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { case STREAM_SCAN_FROM_DATAREADER_RETRIEVE: { SSDataBlock* pSDB = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex); if (pSDB) { + STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info; + uint64_t version = getReaderMaxVersion(pTableScanInfo->dataReader); + updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId,version); pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA; checkUpdateData(pInfo, true, pSDB, false); return pSDB; @@ -1402,6 +1396,12 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { setBlockIntoRes(pInfo, &block); + if (updateInfoIgnore(pInfo->pUpdateInfo, &pInfo->pRes->info.window, pInfo->pRes->info.groupId, pInfo->pRes->info.version)) { + printDataBlock(pInfo->pRes, "stream scan ignore"); + blockDataCleanup(pInfo->pRes); + continue; + } + if (pBlockInfo->rows > 0) { break; } @@ -1418,6 +1418,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { // record the scan action. pInfo->numOfExec++; pOperator->resultInfo.totalRows += pBlockInfo->rows; + printDataBlock(pInfo->pRes, "stream scan"); if (pBlockInfo->rows == 0) { updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 63143875a39324c6997940ebf33aabe9560b5f34..802e1f2306776165ccd1a45fd389adc6b5307ead 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1456,6 +1456,7 @@ static int32_t getAllIntervalWindow(SHashObj* pHashMap, SArray* resWins) { static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval, SHashObj* pPullDataMap, SArray* closeWins, SArray* pRecyPages, SDiskbasedBuf* pDiscBuf) { + qDebug("===stream===close interval window"); void* pIte = NULL; size_t keyLen = 0; while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) { @@ -1772,10 +1773,11 @@ SSDataBlock* createDeleteBlock() { return pBlock; } -void initIntervalDownStream(SOperatorInfo* downstream, uint8_t type) { +void initIntervalDownStream(SOperatorInfo* downstream, uint8_t type, SAggSupporter* pSup) { ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN); SStreamScanInfo* pScanInfo = downstream->info; pScanInfo->sessionSup.parentType = type; + pScanInfo->sessionSup.pIntervalAggSup = pSup; } SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, @@ -1851,7 +1853,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); if (nodeType(pPhyNode) == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL) { - initIntervalDownStream(downstream, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL); + initIntervalDownStream(downstream, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, &pInfo->aggSup); } code = appendDownstream(pOperator, &downstream, 1); @@ -3111,7 +3113,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, createOperatorFpSet(NULL, doStreamFinalIntervalAgg, NULL, NULL, destroyStreamFinalIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL); if (pPhyNode->type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL) { - initIntervalDownStream(downstream, pPhyNode->type); + initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup); } code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index aee1ef5c3cdb999decbd8cfbc611c73ab6f6c26a..6383179fee4dc7145c2c8d854523bfc584c7a49f 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -2236,7 +2236,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "interp", .type = FUNCTION_TYPE_INTERP, - .classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_INTERVAL_INTERPO_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateFirstLast, .getEnvFunc = getSelectivityFuncEnv, .initFunc = functionSetup, @@ -2246,7 +2246,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "derivative", .type = FUNCTION_TYPE_DERIVATIVE, - .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_CUMULATIVE_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_CUMULATIVE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateDerivative, .getEnvFunc = getDerivativeFuncEnv, .initFunc = derivativeFuncSetup, @@ -2280,7 +2280,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "_cache_last_row", .type = FUNCTION_TYPE_CACHE_LAST_ROW, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_MULTI_RES_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateFirstLast, .getEnvFunc = getFirstLastFuncEnv, .initFunc = functionSetup, @@ -2375,7 +2375,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "histogram", .type = FUNCTION_TYPE_HISTOGRAM, - .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_MULTI_ROWS_FUNC | FUNC_MGT_FORBID_FILL_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateHistogram, .getEnvFunc = getHistogramFuncEnv, .initFunc = histogramFunctionSetup, @@ -2414,7 +2414,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "hyperloglog", .type = FUNCTION_TYPE_HYPERLOGLOG, - .classification = FUNC_MGT_AGG_FUNC, + .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_TIMELINE_FUNC, .translateFunc = translateHLL, .getEnvFunc = getHLLFuncEnv, .initFunc = functionSetup, @@ -2428,7 +2428,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { }, { .name = "_hyperloglog_partial", - .type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL, + .type = FUNCTION_TYPE_HYPERLOGLOG_PARTIAL | FUNC_MGT_TIMELINE_FUNC, .classification = FUNC_MGT_AGG_FUNC, .translateFunc = translateHLLPartial, .getEnvFunc = getHLLFuncEnv, @@ -2440,7 +2440,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { }, { .name = "_hyperloglog_merge", - .type = FUNCTION_TYPE_HYPERLOGLOG_MERGE, + .type = FUNCTION_TYPE_HYPERLOGLOG_MERGE | FUNC_MGT_TIMELINE_FUNC, .classification = FUNC_MGT_AGG_FUNC, .translateFunc = translateHLLMerge, .getEnvFunc = getHLLFuncEnv, @@ -2460,12 +2460,12 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { .processFunc = diffFunction, .sprocessFunc = diffScalarFunction, .finalizeFunc = functionFinalize, - .estimateReturnRowsFunc = diffEstReturnRows + .estimateReturnRowsFunc = diffEstReturnRows, }, { .name = "statecount", .type = FUNCTION_TYPE_STATE_COUNT, - .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateStateCount, .getEnvFunc = getStateFuncEnv, .initFunc = functionSetup, @@ -2476,7 +2476,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "stateduration", .type = FUNCTION_TYPE_STATE_DURATION, - .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateStateDuration, .getEnvFunc = getStateFuncEnv, .initFunc = functionSetup, @@ -2487,7 +2487,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "csum", .type = FUNCTION_TYPE_CSUM, - .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_CUMULATIVE_FUNC, .translateFunc = translateCsum, .getEnvFunc = getCsumFuncEnv, .initFunc = functionSetup, @@ -2499,7 +2499,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "mavg", .type = FUNCTION_TYPE_MAVG, - .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, + .classification = FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_SELECT_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC, .translateFunc = translateMavg, .getEnvFunc = getMavgFuncEnv, .initFunc = mavgFunctionSetup, @@ -2521,8 +2521,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = { { .name = "tail", .type = FUNCTION_TYPE_TAIL, - .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | FUNC_MGT_FORBID_STREAM_FUNC | - FUNC_MGT_IMPLICIT_TS_FUNC, + .classification = FUNC_MGT_SELECT_FUNC | FUNC_MGT_INDEFINITE_ROWS_FUNC | FUNC_MGT_TIMELINE_FUNC | + FUNC_MGT_FORBID_STREAM_FUNC | FUNC_MGT_IMPLICIT_TS_FUNC, .translateFunc = translateTail, .getEnvFunc = getTailFuncEnv, .initFunc = tailFunctionSetup, diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index 0767c2e5a2aafa3490a5f914e0cb3d689aad2157..b5cbaa1796a9fd6de21b5554cfac8fae6bb25947 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -2662,19 +2662,11 @@ int32_t apercentilePartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { char* res = taosMemoryCalloc(resultBytes + VARSTR_HEADER_SIZE, sizeof(char)); if (pInfo->algo == APERCT_ALGO_TDIGEST) { - if (pInfo->pTDigest->size > 0) { - memcpy(varDataVal(res), pInfo, resultBytes); - varDataSetLen(res, resultBytes); - } else { - return TSDB_CODE_SUCCESS; - } + memcpy(varDataVal(res), pInfo, resultBytes); + varDataSetLen(res, resultBytes); } else { - if (pInfo->pHisto->numOfElems > 0) { - memcpy(varDataVal(res), pInfo, resultBytes); - varDataSetLen(res, resultBytes); - } else { - return TSDB_CODE_SUCCESS; - } + memcpy(varDataVal(res), pInfo, resultBytes); + varDataSetLen(res, resultBytes); } int32_t slotId = pCtx->pExpr->base.resSchema.slotId; @@ -4651,10 +4643,15 @@ int32_t stateCountFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (colDataIsNull_f(pInputCol->nullbitmap, i)) { colDataAppendNULL(pOutput, i); + // handle selectivity + if (pCtx->subsidiaries.num > 0) { + appendSelectivityValue(pCtx, i, i); + } continue; } - bool ret = checkStateOp(op, pInputCol, i, pCtx->param[2].param); + bool ret = checkStateOp(op, pInputCol, i, pCtx->param[2].param); + int64_t output = -1; if (ret) { output = ++pInfo->count; @@ -4662,6 +4659,11 @@ int32_t stateCountFunction(SqlFunctionCtx* pCtx) { pInfo->count = 0; } colDataAppend(pOutput, i, (char*)&output, false); + + // handle selectivity + if (pCtx->subsidiaries.num > 0) { + appendSelectivityValue(pCtx, i, i); + } } return numOfElems; @@ -4694,6 +4696,10 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) { numOfElems++; if (colDataIsNull_f(pInputCol->nullbitmap, i)) { colDataAppendNULL(pOutput, i); + // handle selectivity + if (pCtx->subsidiaries.num > 0) { + appendSelectivityValue(pCtx, i, i); + } continue; } @@ -4710,6 +4716,11 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) { pInfo->durationStart = 0; } colDataAppend(pOutput, i, (char*)&output, false); + + // handle selectivity + if (pCtx->subsidiaries.num > 0) { + appendSelectivityValue(pCtx, i, i); + } } return numOfElems; @@ -4762,6 +4773,11 @@ int32_t csumFunction(SqlFunctionCtx* pCtx) { } } + // handle selectivity + if (pCtx->subsidiaries.num > 0) { + appendSelectivityValue(pCtx, i, pos); + } + numOfElems++; } @@ -4834,6 +4850,11 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx) { colDataAppend(pOutput, pos, (char*)&result, false); } + // handle selectivity + if (pCtx->subsidiaries.num > 0) { + appendSelectivityValue(pCtx, i, pos); + } + numOfElems++; } diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 5fc4e7882cca2d54235f126ba7972329e9e766f1..339ff3808b06e114fe282997c4ea9c86ff1e1f10 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -221,6 +221,13 @@ bool fmIsLastRowFunc(int32_t funcId) { return FUNCTION_TYPE_LAST_ROW == funcMgtBuiltins[funcId].type; } +bool fmIsSelectValueFunc(int32_t funcId) { + if (funcId < 0 || funcId >= funcMgtBuiltinsNum) { + return false; + } + return FUNCTION_TYPE_SELECT_VALUE == funcMgtBuiltins[funcId].type; +} + void fmFuncMgtDestroy() { void* m = gFunMgtService.pFuncNameHashTable; if (m != NULL && atomic_val_compare_exchange_ptr((void**)&gFunMgtService.pFuncNameHashTable, m, 0) == m) { diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index e1c8ac0204f77f6e4756cd05fba36141b911290f..6dfdbf684094419319d403bbf046a74837d928b1 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -385,6 +385,37 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP .reverse = reverse, .filterFunc = filterFunc}; + char buf[128] = {0}; + float f = 0.0; + double d = 0.0; + if (IS_VAR_DATA_TYPE(left->colValType)) { + if (!IS_VAR_DATA_TYPE(right->colValType)) { + NUM_TO_STRING(right->colValType, right->condValue, sizeof(buf) - 2, buf + VARSTR_HEADER_SIZE); + varDataSetLen(buf, strlen(buf + VARSTR_HEADER_SIZE)); + param.val = buf; + } + } else { + if (left->colValType == TSDB_DATA_TYPE_FLOAT) { + if (right->colValType == TSDB_DATA_TYPE_DOUBLE) { + f = GET_DOUBLE_VAL(right->condValue); + param.val = &f; + } else if (right->colValType == TSDB_DATA_TYPE_BIGINT) { + f = *(int64_t *)(right->condValue); + param.val = &f; + } else { + f = *(int32_t *)(right->condValue); + param.val = &f; + } + } else if (left->colValType == TSDB_DATA_TYPE_DOUBLE) { + if (right->colValType == TSDB_DATA_TYPE_DOUBLE) { + d = GET_DOUBLE_VAL(right->condValue); + param.val = &d; + } else if (right->colValType == TSDB_DATA_TYPE_BIGINT) { + d = *(int64_t *)(right->condValue); + param.val = &d; + } + } + } ret = metaFilterTableIds(arg->metaEx, ¶m, output->result); } return ret; diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index b0c16f26edd5438e3b05446b13ef06dfa3808b20..9d15b01acf0d1301fafc44ea58acb74c36892bd7 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -2684,6 +2684,7 @@ static int32_t jsonToDataType(const SJson* pJson, void* pObj) { static const char* jkExprDataType = "DataType"; static const char* jkExprAliasName = "AliasName"; +static const char* jkExprUserAlias = "UserAlias"; static int32_t exprNodeToJson(const void* pObj, SJson* pJson) { const SExprNode* pNode = (const SExprNode*)pObj; @@ -2692,6 +2693,9 @@ static int32_t exprNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddStringToObject(pJson, jkExprAliasName, pNode->aliasName); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddStringToObject(pJson, jkExprUserAlias, pNode->userAlias); + } return code; } @@ -2703,6 +2707,9 @@ static int32_t jsonToExprNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetStringValue(pJson, jkExprAliasName, pNode->aliasName); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetStringValue(pJson, jkExprUserAlias, pNode->userAlias); + } return code; } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 3c6fbe409c6be5d40b0686376e4a0ec87c9e275e..abab2126c0d7aa5267254df7e708b819f8d35724 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -745,6 +745,7 @@ void nodesDestroyNode(SNode* pNode) { } taosArrayDestroy(pQuery->pDbList); taosArrayDestroy(pQuery->pTableList); + taosArrayDestroy(pQuery->pTargetTableList); taosArrayDestroy(pQuery->pPlaceholderValues); nodesDestroyNode(pQuery->pPrepareRoot); break; @@ -1502,7 +1503,7 @@ static EDealRes collectColumns(SNode* pNode, void* pContext) { SCollectColumnsCxt* pCxt = (SCollectColumnsCxt*)pContext; if (QUERY_NODE_COLUMN == nodeType(pNode)) { SColumnNode* pCol = (SColumnNode*)pNode; - if (isCollectType(pCxt->collectType, pCol->colType) && + if (isCollectType(pCxt->collectType, pCol->colType) && 0 != strcmp(pCol->colName, "*") && (NULL == pCxt->pTableAlias || 0 == strcmp(pCxt->pTableAlias, pCol->tableAlias))) { return doCollect(pCxt, pCol, pNode); } @@ -1816,187 +1817,3 @@ int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc) { return TSDB_CODE_SUCCESS; } - -typedef struct SClassifyConditionCxt { - bool hasPrimaryKey; - bool hasTagIndexCol; - bool hasTagCol; - bool hasOtherCol; -} SClassifyConditionCxt; - -static EDealRes classifyConditionImpl(SNode* pNode, void* pContext) { - SClassifyConditionCxt* pCxt = (SClassifyConditionCxt*)pContext; - if (QUERY_NODE_COLUMN == nodeType(pNode)) { - SColumnNode* pCol = (SColumnNode*)pNode; - if (PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId && TSDB_SYSTEM_TABLE != pCol->tableType) { - pCxt->hasPrimaryKey = true; - } else if (pCol->hasIndex) { - pCxt->hasTagIndexCol = true; - pCxt->hasTagCol = true; - } else if (COLUMN_TYPE_TAG == pCol->colType || COLUMN_TYPE_TBNAME == pCol->colType) { - pCxt->hasTagCol = true; - } else { - pCxt->hasOtherCol = true; - } - } - return DEAL_RES_CONTINUE; -} - -typedef enum EConditionType { - COND_TYPE_PRIMARY_KEY = 1, - COND_TYPE_TAG_INDEX, - COND_TYPE_TAG, - COND_TYPE_NORMAL -} EConditionType; - -static EConditionType classifyCondition(SNode* pNode) { - SClassifyConditionCxt cxt = {.hasPrimaryKey = false, .hasTagIndexCol = false, .hasOtherCol = false}; - nodesWalkExpr(pNode, classifyConditionImpl, &cxt); - return cxt.hasOtherCol ? COND_TYPE_NORMAL - : (cxt.hasPrimaryKey && cxt.hasTagCol - ? COND_TYPE_NORMAL - : (cxt.hasPrimaryKey ? COND_TYPE_PRIMARY_KEY - : (cxt.hasTagIndexCol ? COND_TYPE_TAG_INDEX : COND_TYPE_TAG))); -} - -static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, - SNode** pOtherCond) { - SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(*pCondition); - - int32_t code = TSDB_CODE_SUCCESS; - - SNodeList* pPrimaryKeyConds = NULL; - SNodeList* pTagIndexConds = NULL; - SNodeList* pTagConds = NULL; - SNodeList* pOtherConds = NULL; - SNode* pCond = NULL; - FOREACH(pCond, pLogicCond->pParameterList) { - switch (classifyCondition(pCond)) { - case COND_TYPE_PRIMARY_KEY: - if (NULL != pPrimaryKeyCond) { - code = nodesListMakeAppend(&pPrimaryKeyConds, nodesCloneNode(pCond)); - } - break; - case COND_TYPE_TAG_INDEX: - if (NULL != pTagIndexCond) { - code = nodesListMakeAppend(&pTagIndexConds, nodesCloneNode(pCond)); - } - if (NULL != pTagCond) { - code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); - } - break; - case COND_TYPE_TAG: - if (NULL != pTagCond) { - code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); - } - break; - case COND_TYPE_NORMAL: - default: - if (NULL != pOtherCond) { - code = nodesListMakeAppend(&pOtherConds, nodesCloneNode(pCond)); - } - break; - } - if (TSDB_CODE_SUCCESS != code) { - break; - } - } - - SNode* pTempPrimaryKeyCond = NULL; - SNode* pTempTagIndexCond = NULL; - SNode* pTempTagCond = NULL; - SNode* pTempOtherCond = NULL; - if (TSDB_CODE_SUCCESS == code) { - code = nodesMergeConds(&pTempPrimaryKeyCond, &pPrimaryKeyConds); - } - if (TSDB_CODE_SUCCESS == code) { - code = nodesMergeConds(&pTempTagIndexCond, &pTagIndexConds); - } - if (TSDB_CODE_SUCCESS == code) { - code = nodesMergeConds(&pTempTagCond, &pTagConds); - } - if (TSDB_CODE_SUCCESS == code) { - code = nodesMergeConds(&pTempOtherCond, &pOtherConds); - } - - if (TSDB_CODE_SUCCESS == code) { - if (NULL != pPrimaryKeyCond) { - *pPrimaryKeyCond = pTempPrimaryKeyCond; - } - if (NULL != pTagIndexCond) { - *pTagIndexCond = pTempTagIndexCond; - } - if (NULL != pTagCond) { - *pTagCond = pTempTagCond; - } - if (NULL != pOtherCond) { - *pOtherCond = pTempOtherCond; - } - nodesDestroyNode(*pCondition); - *pCondition = NULL; - } else { - nodesDestroyList(pPrimaryKeyConds); - nodesDestroyList(pTagIndexConds); - nodesDestroyList(pTagConds); - nodesDestroyList(pOtherConds); - nodesDestroyNode(pTempPrimaryKeyCond); - nodesDestroyNode(pTempTagIndexCond); - nodesDestroyNode(pTempTagCond); - nodesDestroyNode(pTempOtherCond); - } - - return code; -} - -int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, - SNode** pOtherCond) { - if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pCondition) && - LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*pCondition)->condType) { - return partitionLogicCond(pCondition, pPrimaryKeyCond, pTagIndexCond, pTagCond, pOtherCond); - } - - bool needOutput = false; - switch (classifyCondition(*pCondition)) { - case COND_TYPE_PRIMARY_KEY: - if (NULL != pPrimaryKeyCond) { - *pPrimaryKeyCond = *pCondition; - needOutput = true; - } - break; - case COND_TYPE_TAG_INDEX: - if (NULL != pTagIndexCond) { - *pTagIndexCond = *pCondition; - needOutput = true; - } - if (NULL != pTagCond) { - SNode* pTempCond = *pCondition; - if (NULL != pTagIndexCond) { - pTempCond = nodesCloneNode(*pCondition); - if (NULL == pTempCond) { - return TSDB_CODE_OUT_OF_MEMORY; - } - } - *pTagCond = pTempCond; - needOutput = true; - } - break; - case COND_TYPE_TAG: - if (NULL != pTagCond) { - *pTagCond = *pCondition; - needOutput = true; - } - break; - case COND_TYPE_NORMAL: - default: - if (NULL != pOtherCond) { - *pOtherCond = *pCondition; - needOutput = true; - } - break; - } - if (needOutput) { - *pCondition = NULL; - } - - return TSDB_CODE_SUCCESS; -} diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index 705ba9d339c83e397bffce7da967b7c3feec93ae..d0c994e210e21ca5f4fdd79752e17d795240e03c 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -177,7 +177,7 @@ SNode* createCreateDnodeStmt(SAstCreateContext* pCxt, const SToken* pFqdn, const SNode* createDropDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode); SNode* createAlterDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, const SToken* pConfig, const SToken* pValue); SNode* createCreateIndexStmt(SAstCreateContext* pCxt, EIndexType type, bool ignoreExists, SToken* pIndexName, - SToken* pTableName, SNodeList* pCols, SNode* pOptions); + SNode* pRealTable, SNodeList* pCols, SNode* pOptions); SNode* createIndexOption(SAstCreateContext* pCxt, SNodeList* pFuncs, SNode* pInterval, SNode* pOffset, SNode* pSliding, SNode* pStreamOptions); SNode* createDropIndexStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pIndexName); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index f625ebd388ba5d57f8b284ac8c25ed9ab475cbb3..21cb310f1077f2b796f794d0864f280fd49e3080 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -33,6 +33,8 @@ } else { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INCOMPLETE_SQL); } + } else if (TSDB_CODE_PAR_DB_NOT_SPECIFIED == pCxt->errCode && TK_NK_FLOAT == TOKEN.type) { + pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, TOKEN.z); } } @@ -422,9 +424,7 @@ from_db_opt(A) ::= FROM db_name(B). /************************************************ create index ********************************************************/ cmd ::= CREATE SMA INDEX not_exists_opt(D) - index_name(A) ON table_name(B) index_options(C). { pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, D, &A, &B, NULL, C); } -//cmd ::= CREATE FULLTEXT INDEX not_exists_opt(D) -// index_name(A) ON table_name(B) NK_LP col_name_list(C) NK_RP. { pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_FULLTEXT, D, &A, &B, C, NULL); } + index_name(A) ON full_table_name(B) index_options(C). { pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, D, &A, B, NULL, C); } cmd ::= DROP INDEX exists_opt(B) index_name(A). { pCxt->pRootNode = createDropIndexStmt(pCxt, B, &A); } index_options(A) ::= FUNCTION NK_LP func_list(B) NK_RP INTERVAL diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index ff2157fe675f84c3d646b402a8de62978b0f9f02..41c614eba8eded43928b3f35ba60859016d6c0ab 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -233,18 +233,22 @@ SNode* createRawExprNodeExt(SAstCreateContext* pCxt, const SToken* pStart, const SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) { CHECK_PARSER_STATUS(pCxt); SRawExprNode* pRawExpr = (SRawExprNode*)pNode; - SNode* pExpr = pRawExpr->pNode; - if (nodesIsExprNode(pExpr)) { + SNode* pRealizedExpr = pRawExpr->pNode; + if (nodesIsExprNode(pRealizedExpr)) { + SExprNode* pExpr = (SExprNode*)pRealizedExpr; if (QUERY_NODE_COLUMN == nodeType(pExpr)) { - strcpy(((SExprNode*)pExpr)->aliasName, ((SColumnNode*)pExpr)->colName); + strcpy(pExpr->aliasName, ((SColumnNode*)pExpr)->colName); + strcpy(pExpr->userAlias, ((SColumnNode*)pExpr)->colName); } else { - int32_t len = TMIN(sizeof(((SExprNode*)pExpr)->aliasName) - 1, pRawExpr->n); - strncpy(((SExprNode*)pExpr)->aliasName, pRawExpr->p, len); - ((SExprNode*)pExpr)->aliasName[len] = '\0'; + int32_t len = TMIN(sizeof(pExpr->aliasName) - 1, pRawExpr->n); + strncpy(pExpr->aliasName, pRawExpr->p, len); + pExpr->aliasName[len] = '\0'; + strncpy(pExpr->userAlias, pRawExpr->p, len); + pExpr->userAlias[len] = '\0'; } } taosMemoryFreeClear(pNode); - return pExpr; + return pRealizedExpr; } SToken getTokenFromRawExprNode(SAstCreateContext* pCxt, SNode* pNode) { @@ -641,11 +645,12 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, SToken* pAlias) { CHECK_PARSER_STATUS(pCxt); trimEscape(pAlias); - int32_t len = TMIN(sizeof(((SExprNode*)pNode)->aliasName) - 1, pAlias->n); - strncpy(((SExprNode*)pNode)->aliasName, pAlias->z, len); - ((SExprNode*)pNode)->aliasName[len] = '\0'; - strncpy(((SExprNode*)pNode)->userAlias, pAlias->z, len); - ((SExprNode*)pNode)->userAlias[len] = '\0'; + SExprNode* pExpr = (SExprNode*)pNode; + int32_t len = TMIN(sizeof(pExpr->aliasName) - 1, pAlias->n); + strncpy(pExpr->aliasName, pAlias->z, len); + pExpr->aliasName[len] = '\0'; + strncpy(pExpr->userAlias, pAlias->z, len); + pExpr->userAlias[len] = '\0'; return pNode; } @@ -766,13 +771,21 @@ SNode* createSelectStmt(SAstCreateContext* pCxt, bool isDistinct, SNodeList* pPr return (SNode*)select; } +static void setSubquery(SNode* pStmt) { + if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) { + ((SSelectStmt*)pStmt)->isSubquery = true; + } +} + SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode* pLeft, SNode* pRight) { CHECK_PARSER_STATUS(pCxt); SSetOperator* setOp = (SSetOperator*)nodesMakeNode(QUERY_NODE_SET_OPERATOR); CHECK_OUT_OF_MEM(setOp); setOp->opType = type; setOp->pLeft = pLeft; + setSubquery(setOp->pLeft); setOp->pRight = pRight; + setSubquery(setOp->pRight); sprintf(setOp->stmtName, "%p", setOp); return (SNode*)setOp; } @@ -1390,9 +1403,9 @@ SNode* createAlterDnodeStmt(SAstCreateContext* pCxt, const SToken* pDnode, const } SNode* createCreateIndexStmt(SAstCreateContext* pCxt, EIndexType type, bool ignoreExists, SToken* pIndexName, - SToken* pTableName, SNodeList* pCols, SNode* pOptions) { + SNode* pRealTable, SNodeList* pCols, SNode* pOptions) { CHECK_PARSER_STATUS(pCxt); - if (!checkIndexName(pCxt, pIndexName) || !checkTableName(pCxt, pTableName) || !checkDbName(pCxt, NULL, true)) { + if (!checkIndexName(pCxt, pIndexName)) { return NULL; } SCreateIndexStmt* pStmt = (SCreateIndexStmt*)nodesMakeNode(QUERY_NODE_CREATE_INDEX_STMT); @@ -1400,7 +1413,9 @@ SNode* createCreateIndexStmt(SAstCreateContext* pCxt, EIndexType type, bool igno pStmt->indexType = type; pStmt->ignoreExists = ignoreExists; COPY_STRING_FORM_ID_TOKEN(pStmt->indexName, pIndexName); - COPY_STRING_FORM_ID_TOKEN(pStmt->tableName, pTableName); + strcpy(pStmt->dbName, ((SRealTableNode*)pRealTable)->table.dbName); + strcpy(pStmt->tableName, ((SRealTableNode*)pRealTable)->table.tableName); + nodesDestroyNode(pRealTable); pStmt->pCols = pCols; pStmt->pOptions = (SIndexOptions*)pOptions; return (SNode*)pStmt; diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index a93091707aaa933c61a32840e70e54112fbc9327..2d4e29a5535bf87f36c027ba6d85d51848c039bf 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -244,7 +244,10 @@ static int32_t collectMetaKeyFromDropTable(SCollectMetaKeyCxt* pCxt, SDropTableS } static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTableStmt* pStmt) { - int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + int32_t code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + } if (TSDB_CODE_SUCCESS == code) { code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); } @@ -252,7 +255,11 @@ static int32_t collectMetaKeyFromAlterTable(SCollectMetaKeyCxt* pCxt, SAlterTabl } static int32_t collectMetaKeyFromAlterStable(SCollectMetaKeyCxt* pCxt, SAlterTableStmt* pStmt) { - return reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + int32_t code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache); + if (TSDB_CODE_SUCCESS == code) { + code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, pCxt->pMetaCache); + } + return code; } static int32_t collectMetaKeyFromUseDatabase(SCollectMetaKeyCxt* pCxt, SUseDatabaseStmt* pStmt) { diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index d564d536335db100d282230c7031d807c28f1f41..fbc5e1b4b0ebd010c10bd72d5417cfedd6041527 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -434,7 +434,7 @@ static FORCE_INLINE int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, } static bool isNullStr(SToken* pToken) { - return ((pToken->type == TK_NK_STRING) && (pToken->n != 0) && + return ((pToken->type == TK_NK_STRING) && (strlen(TSDB_DATA_NULL_STR_L) == pToken->n) && (strncasecmp(TSDB_DATA_NULL_STR_L, pToken->z, pToken->n) == 0)); } @@ -1175,11 +1175,6 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks, getSTSRowAppendInfo(pBuilder->rowType, spd, i, ¶m.toffset, ¶m.colIdx); CHECK_CODE(parseValueToken(&pCxt->pSql, &sToken, pSchema, timePrec, tmpTokenBuf, MemRowAppend, ¶m, &pCxt->msg)); - if (PRIMARYKEY_TIMESTAMP_COL_ID == pSchema->colId) { - TSKEY tsKey = TD_ROW_KEY(row); - checkTimestamp(pDataBlocks, (const char*)&tsKey); - } - if (i < spd->numOfBound - 1) { NEXT_VALID_TOKEN(pCxt->pSql, sToken); if (TK_NK_COMMA != sToken.type) { @@ -1188,6 +1183,9 @@ static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks, } } + TSKEY tsKey = TD_ROW_KEY(row); + checkTimestamp(pDataBlocks, (const char*)&tsKey); + if (!isParseBindParam) { // set the null value for the columns that do not assign values if ((spd->numOfBound < spd->numOfCols) && TD_IS_TP_ROW(row)) { diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 9e8b28f36243dfdb8d78758dc9bf9be742b43620..e54bc9eb4cd38c0ea2071eee9e9583f9ac744c8d 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -61,16 +61,42 @@ static bool beforeHaving(ESqlClause clause) { return clause < SQL_CLAUSE_HAVING; static bool afterHaving(ESqlClause clause) { return clause > SQL_CLAUSE_HAVING; } +static bool hasSameTableAlias(SArray* pTables) { + if (taosArrayGetSize(pTables) < 2) { + return false; + } + STableNode* pTable0 = taosArrayGetP(pTables, 0); + for (int32_t i = 1; i < taosArrayGetSize(pTables); ++i) { + STableNode* pTable = taosArrayGetP(pTables, i); + if (0 == strcmp(pTable0->tableAlias, pTable->tableAlias)) { + return true; + } + } + return false; +} + static int32_t addNamespace(STranslateContext* pCxt, void* pTable) { size_t currTotalLevel = taosArrayGetSize(pCxt->pNsLevel); if (currTotalLevel > pCxt->currLevel) { SArray* pTables = taosArrayGetP(pCxt->pNsLevel, pCxt->currLevel); taosArrayPush(pTables, &pTable); + if (hasSameTableAlias(pTables)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, + TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, + "Not unique table/alias: '%s'", + ((STableNode*)pTable)->tableAlias); + } } else { do { SArray* pTables = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES); if (pCxt->currLevel == currTotalLevel) { taosArrayPush(pTables, &pTable); + if (hasSameTableAlias(pTables)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, + TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, + "Not unique table/alias: '%s'", + ((STableNode*)pTable)->tableAlias); + } } taosArrayPush(pCxt->pNsLevel, &pTables); ++currTotalLevel; @@ -398,6 +424,7 @@ static void destroyTranslateContext(STranslateContext* pCxt) { taosHashCleanup(pCxt->pDbs); taosHashCleanup(pCxt->pTables); + taosHashCleanup(pCxt->pTargetTables); } static bool isSelectStmt(SNode* pCurrStmt) { @@ -523,6 +550,9 @@ static void setColumnInfoBySchema(const SRealTableNode* pTable, const SSchema* p if ('\0' == pCol->node.aliasName[0]) { strcpy(pCol->node.aliasName, pColSchema->name); } + if ('\0' == pCol->node.userAlias[0]) { + strcpy(pCol->node.userAlias, pColSchema->name); + } pCol->tableId = pTable->pMeta->uid; pCol->tableType = pTable->pMeta->tableType; pCol->colId = pColSchema->colId; @@ -549,6 +579,9 @@ static void setColumnInfoByExpr(STempTableNode* pTable, SExprNode* pExpr, SColum if ('\0' == pCol->node.aliasName[0]) { strcpy(pCol->node.aliasName, pCol->colName); } + if ('\0' == pCol->node.userAlias[0]) { + strcpy(pCol->node.userAlias, pCol->colName); + } pCol->node.resType = pExpr->resType; } @@ -691,7 +724,7 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p SNode* pNode; FOREACH(pNode, pProjectionList) { SExprNode* pExpr = (SExprNode*)pNode; - if (0 == strcmp((*pCol)->colName, pExpr->aliasName)) { + if (0 == strcmp((*pCol)->colName, pExpr->userAlias)) { SColumnRefNode* pColRef = (SColumnRefNode*)nodesMakeNode(QUERY_NODE_COLUMN_REF); if (NULL == pColRef) { pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY; @@ -879,8 +912,7 @@ static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal, } case TSDB_DATA_TYPE_VARCHAR: case TSDB_DATA_TYPE_VARBINARY: { - if (strict && (!IS_VAR_DATA_TYPE(pVal->node.resType.type) || - pVal->node.resType.bytes > targetDt.bytes - VARSTR_HEADER_SIZE)) { + if (strict && (pVal->node.resType.bytes > targetDt.bytes - VARSTR_HEADER_SIZE)) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); } pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); @@ -900,9 +932,6 @@ static EDealRes translateNormalValue(STranslateContext* pCxt, SValueNode* pVal, break; } case TSDB_DATA_TYPE_NCHAR: { - if (strict && !IS_VAR_DATA_TYPE(pVal->node.resType.type)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_WRONG_VALUE_TYPE, pVal->literal); - } pVal->datum.p = taosMemoryCalloc(1, targetDt.bytes + 1); if (NULL == pVal->datum.p) { return generateDealNodeErrMsg(pCxt, TSDB_CODE_OUT_OF_MEMORY); @@ -1535,6 +1564,7 @@ static EDealRes rewriteColToSelectValFunc(STranslateContext* pCxt, SNode** pNode } strcpy(pFunc->functionName, "_select_value"); strcpy(pFunc->node.aliasName, ((SExprNode*)*pNode)->aliasName); + strcpy(pFunc->node.userAlias, ((SExprNode*)*pNode)->userAlias); pCxt->errCode = nodesListMakeAppend(&pFunc->pParameterList, *pNode); if (TSDB_CODE_SUCCESS == pCxt->errCode) { pCxt->errCode = getFuncInfo(pCxt, pFunc); @@ -2171,12 +2201,51 @@ static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect return TSDB_CODE_SUCCESS; } +static int32_t rewriteProjectAlias(SNodeList* pProjectionList) { + int32_t no = 1; + SNode* pProject = NULL; + FOREACH(pProject, pProjectionList) { + SExprNode* pExpr = (SExprNode*)pProject; + if ('\0' == pExpr->userAlias[0]) { + strcpy(pExpr->userAlias, pExpr->aliasName); + } + sprintf(pExpr->aliasName, "#expr_%d", no++); + } + return TSDB_CODE_SUCCESS; +} + +static int32_t checkProjectAlias(STranslateContext* pCxt, SNodeList* pProjectionList) { + SHashObj* pUserAliasSet = taosHashInit(LIST_LENGTH(pProjectionList), + taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); + SNode* pProject = NULL; + FOREACH(pProject, pProjectionList) { + SExprNode* pExpr = (SExprNode*)pProject; + if (NULL != taosHashGet(pUserAliasSet, pExpr->userAlias, strlen(pExpr->userAlias))) { + taosHashCleanup(pUserAliasSet); + return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_AMBIGUOUS_COLUMN, pExpr->userAlias); + } + taosHashPut(pUserAliasSet, pExpr->userAlias, strlen(pExpr->userAlias), &pExpr, POINTER_BYTES); + } + taosHashCleanup(pUserAliasSet); + return TSDB_CODE_SUCCESS; +} + +static int32_t translateProjectionList(STranslateContext* pCxt, SSelectStmt* pSelect) { + if (pSelect->isSubquery) { + return checkProjectAlias(pCxt, pSelect->pProjectionList); + } + return rewriteProjectAlias(pSelect->pProjectionList); +} + static int32_t translateSelectList(STranslateContext* pCxt, SSelectStmt* pSelect) { pCxt->currClause = SQL_CLAUSE_SELECT; int32_t code = translateExprList(pCxt, pSelect->pProjectionList); if (TSDB_CODE_SUCCESS == code) { code = translateStar(pCxt, pSelect); } + if (TSDB_CODE_SUCCESS == code) { + code = translateProjectionList(pCxt, pSelect); + } if (TSDB_CODE_SUCCESS == code) { code = checkExprListForGroupBy(pCxt, pSelect, pSelect->pProjectionList); } @@ -2232,7 +2301,7 @@ static int32_t getQueryTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWi } SNode* pPrimaryKeyCond = NULL; - nodesPartitionCond(&pCond, &pPrimaryKeyCond, NULL, NULL, NULL); + filterPartitionCond(&pCond, &pPrimaryKeyCond, NULL, NULL, NULL); int32_t code = TSDB_CODE_SUCCESS; if (NULL != pPrimaryKeyCond) { @@ -2699,6 +2768,7 @@ static SNode* createSetOperProject(const char* pTableAlias, SNode* pNode) { strcpy(pCol->tableAlias, pTableAlias); strcpy(pCol->colName, ((SExprNode*)pNode)->aliasName); strcpy(pCol->node.aliasName, pCol->colName); + strcpy(pCol->node.userAlias, ((SExprNode*)pNode)->userAlias); return (SNode*)pCol; } @@ -2810,7 +2880,7 @@ static int32_t partitionDeleteWhere(STranslateContext* pCxt, SDeleteStmt* pDelet SNode* pPrimaryKeyCond = NULL; SNode* pOtherCond = NULL; - int32_t code = nodesPartitionCond(&pDelete->pWhere, &pPrimaryKeyCond, NULL, &pDelete->pTagCond, &pOtherCond); + int32_t code = filterPartitionCond(&pDelete->pWhere, &pPrimaryKeyCond, NULL, &pDelete->pTagCond, &pOtherCond); if (TSDB_CODE_SUCCESS == code && NULL != pOtherCond) { code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DELETE_WHERE); } @@ -3300,6 +3370,10 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName } static int32_t checkCreateDatabase(STranslateContext* pCxt, SCreateDatabaseStmt* pStmt) { + if (NULL != strchr(pStmt->dbName, '.')) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, + "The database name cannot contain '.'"); + } return checkDatabaseOptions(pCxt, pStmt->dbName, pStmt->pOptions); } @@ -4079,7 +4153,7 @@ static SSchema* getTagSchema(STableMeta* pTableMeta, const char* pTagName) { return NULL; } -static int32_t checkAlterSuperTableImpl(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta) { +static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTableStmt* pStmt, STableMeta* pTableMeta) { SSchema* pTagsSchema = getTableTagSchema(pTableMeta); if (getNumOfTags(pTableMeta) == 1 && pTagsSchema->type == TSDB_DATA_TYPE_JSON && (pStmt->alterType == TSDB_ALTER_TABLE_ADD_TAG || pStmt->alterType == TSDB_ALTER_TABLE_DROP_TAG || @@ -4105,11 +4179,16 @@ static int32_t checkAlterSuperTableImpl(STranslateContext* pCxt, SAlterTableStmt } static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pStmt) { - if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType || TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME == pStmt->alterType) { + if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == pStmt->alterType) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Set tag value only available for child table"); } + if (TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME == pStmt->alterType) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, + "Rename column only available for normal table"); + } + if (pStmt->alterType == TSDB_ALTER_TABLE_UPDATE_OPTIONS && -1 != pStmt->pOptions->ttl) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE); } @@ -4122,10 +4201,21 @@ static int32_t checkAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt* pS return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_COL_JSON); } + SDbCfgInfo dbCfg = {0}; + int32_t code = getDBCfg(pCxt, pStmt->dbName, &dbCfg); + if (TSDB_CODE_SUCCESS == code && NULL != dbCfg.pRetensions && + (TSDB_ALTER_TABLE_ADD_COLUMN == pStmt->alterType || TSDB_ALTER_TABLE_DROP_COLUMN == pStmt->alterType || + TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE, + "Modifying the table schema is not supported in databases " + "configured with the 'RETENTIONS' option"); + } STableMeta* pTableMeta = NULL; - int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta); if (TSDB_CODE_SUCCESS == code) { - code = checkAlterSuperTableImpl(pCxt, pStmt, pTableMeta); + code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta); + } + if (TSDB_CODE_SUCCESS == code) { + code = checkAlterSuperTableBySchema(pCxt, pStmt, pTableMeta); } taosMemoryFree(pTableMeta); return code; @@ -4263,9 +4353,8 @@ static int32_t getSmaIndexAst(STranslateContext* pCxt, SCreateIndexStmt* pStmt, static int32_t buildCreateSmaReq(STranslateContext* pCxt, SCreateIndexStmt* pStmt, SMCreateSmaReq* pReq) { SName name; tNameExtractFullName(toName(pCxt->pParseCxt->acctId, pCxt->pParseCxt->db, pStmt->indexName, &name), pReq->name); - strcpy(name.tname, pStmt->tableName); - name.tname[strlen(pStmt->tableName)] = '\0'; - tNameExtractFullName(&name, pReq->stb); + memset(&name, 0, sizeof(SName)); + tNameExtractFullName(toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &name), pReq->stb); pReq->igExists = pStmt->ignoreExists; pReq->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i; pReq->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; @@ -4343,7 +4432,7 @@ static int32_t translateCreateSmaIndex(STranslateContext* pCxt, SCreateIndexStmt static int32_t buildCreateFullTextReq(STranslateContext* pCxt, SCreateIndexStmt* pStmt, SMCreateFullTextReq* pReq) { // impl later - return 0; + return TSDB_CODE_SUCCESS; } static int32_t translateCreateFullTextIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { @@ -4357,12 +4446,10 @@ static int32_t translateCreateFullTextIndex(STranslateContext* pCxt, SCreateInde } static int32_t translateCreateIndex(STranslateContext* pCxt, SCreateIndexStmt* pStmt) { - if (INDEX_TYPE_SMA == pStmt->indexType) { - return translateCreateSmaIndex(pCxt, pStmt); - } else if (INDEX_TYPE_FULLTEXT == pStmt->indexType) { + if (INDEX_TYPE_FULLTEXT == pStmt->indexType) { return translateCreateFullTextIndex(pCxt, pStmt); } - return TSDB_CODE_FAILED; + return translateCreateSmaIndex(pCxt, pStmt); } static int32_t translateDropIndex(STranslateContext* pCxt, SDropIndexStmt* pStmt) { @@ -4983,7 +5070,7 @@ static int32_t translateSubquery(STranslateContext* pCxt, SNode* pNode) { SNode* pCurrStmt = pCxt->pCurrStmt; int32_t currLevel = pCxt->currLevel; pCxt->currLevel = ++(pCxt->levelNo); - int32_t code = translateQuery(pCxt, pNode); + int32_t code = translateQuery(pCxt, pNode); pCxt->currClause = currClause; pCxt->pCurrStmt = pCurrStmt; pCxt->currLevel = currLevel; diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index af5a51a903c61bf481404a16fe57b1d5a929fd93..549e8d68ff9bd7d20779a11c6cdfc4474b835bc1 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -219,82 +219,82 @@ typedef union { #define YY_ACTTAB_COUNT (2395) static const YYACTIONTYPE yy_action[] = { /* 0 */ 433, 1937, 434, 1503, 1654, 1655, 1599, 326, 441, 548, - /* 10 */ 434, 1503, 39, 37, 1936, 143, 1709, 1776, 1934, 1937, + /* 10 */ 434, 1503, 39, 37, 1936, 144, 1709, 1776, 1934, 1937, /* 20 */ 339, 1469, 1264, 513, 1610, 40, 38, 36, 35, 34, - /* 30 */ 1793, 325, 163, 1341, 1706, 1262, 1934, 124, 1290, 1023, - /* 40 */ 1006, 1022, 104, 1772, 1778, 103, 102, 101, 100, 99, - /* 50 */ 98, 97, 96, 95, 156, 570, 1336, 73, 1811, 344, - /* 60 */ 147, 14, 1653, 1655, 1568, 88, 577, 1647, 1270, 1024, - /* 70 */ 1023, 1762, 1022, 576, 39, 37, 1404, 122, 121, 1604, - /* 80 */ 1010, 1011, 339, 1530, 1264, 469, 1600, 553, 145, 1, - /* 90 */ 1480, 550, 158, 1879, 1880, 1341, 1884, 1262, 1825, 551, - /* 100 */ 1024, 64, 91, 1794, 579, 1796, 1797, 575, 1660, 570, + /* 30 */ 1793, 325, 164, 1341, 1706, 1262, 1934, 125, 1290, 1023, + /* 40 */ 1006, 1022, 105, 1772, 1778, 104, 103, 102, 101, 100, + /* 50 */ 99, 98, 97, 96, 157, 570, 1336, 73, 1811, 344, + /* 60 */ 148, 14, 1653, 1655, 1568, 89, 577, 1647, 1270, 1024, + /* 70 */ 1023, 1762, 1022, 576, 39, 37, 1404, 123, 122, 1604, + /* 80 */ 1010, 1011, 339, 1530, 1264, 469, 1600, 553, 146, 1, + /* 90 */ 1480, 550, 159, 1879, 1880, 1341, 1884, 1262, 1825, 551, + /* 100 */ 1024, 64, 92, 1794, 579, 1796, 1797, 575, 1660, 570, /* 110 */ 1291, 662, 1871, 1408, 1937, 312, 306, 1867, 1336, 1289, - /* 120 */ 36, 35, 34, 14, 1658, 1343, 1344, 1935, 1937, 104, - /* 130 */ 1270, 1934, 103, 102, 101, 100, 99, 98, 97, 96, - /* 140 */ 95, 164, 450, 1161, 1162, 1934, 639, 638, 637, 636, - /* 150 */ 349, 2, 635, 634, 125, 629, 628, 627, 626, 625, - /* 160 */ 624, 623, 136, 619, 618, 617, 348, 347, 614, 613, + /* 120 */ 36, 35, 34, 14, 1658, 1343, 1344, 1935, 1937, 105, + /* 130 */ 1270, 1934, 104, 103, 102, 101, 100, 99, 98, 97, + /* 140 */ 96, 165, 450, 1161, 1162, 1934, 639, 638, 637, 636, + /* 150 */ 349, 2, 635, 634, 126, 629, 628, 627, 626, 625, + /* 160 */ 624, 623, 137, 619, 618, 617, 348, 347, 614, 613, /* 170 */ 1265, 316, 1263, 662, 1491, 33, 32, 1466, 1937, 40, /* 180 */ 38, 36, 35, 34, 30, 261, 63, 1343, 1344, 432, - /* 190 */ 1290, 163, 436, 1268, 1269, 1934, 1318, 1319, 1321, 1322, + /* 190 */ 1290, 164, 436, 1268, 1269, 1934, 1318, 1319, 1321, 1322, /* 200 */ 1323, 1324, 1325, 1326, 572, 568, 1334, 1335, 1337, 1338, /* 210 */ 1339, 1340, 1342, 1345, 1468, 1762, 33, 32, 612, 551, - /* 220 */ 40, 38, 36, 35, 34, 1289, 317, 165, 315, 314, - /* 230 */ 610, 473, 1265, 209, 1263, 475, 1425, 354, 113, 112, - /* 240 */ 111, 110, 109, 108, 107, 106, 105, 253, 384, 134, - /* 250 */ 133, 607, 606, 605, 63, 1268, 1269, 474, 1318, 1319, + /* 220 */ 40, 38, 36, 35, 34, 1289, 317, 166, 315, 314, + /* 230 */ 610, 473, 1265, 210, 1263, 475, 1425, 354, 114, 113, + /* 240 */ 112, 111, 110, 109, 108, 107, 106, 253, 384, 135, + /* 250 */ 134, 607, 606, 605, 63, 1268, 1269, 474, 1318, 1319, /* 260 */ 1321, 1322, 1323, 1324, 1325, 1326, 572, 568, 1334, 1335, /* 270 */ 1337, 1338, 1339, 1340, 1342, 1345, 39, 37, 1937, 534, /* 280 */ 1423, 1424, 1426, 1427, 339, 303, 1264, 378, 43, 75, - /* 290 */ 305, 162, 221, 515, 1793, 1934, 1320, 1341, 1435, 1262, + /* 290 */ 305, 163, 222, 515, 1793, 1934, 1320, 1341, 1435, 1262, /* 300 */ 1119, 601, 600, 599, 1123, 598, 1125, 1126, 597, 1128, /* 310 */ 594, 487, 1134, 591, 1136, 1137, 588, 585, 440, 1811, - /* 320 */ 1336, 436, 1811, 537, 165, 14, 497, 541, 1937, 1461, + /* 320 */ 1336, 436, 1811, 537, 166, 14, 497, 541, 1937, 1461, /* 330 */ 577, 1699, 1270, 551, 377, 1762, 376, 576, 39, 37, - /* 340 */ 208, 162, 170, 63, 548, 1934, 339, 49, 1264, 525, - /* 350 */ 665, 1244, 1245, 2, 490, 1490, 222, 223, 484, 1341, - /* 360 */ 55, 1262, 1825, 207, 268, 540, 93, 1794, 579, 1796, - /* 370 */ 1797, 575, 124, 570, 1289, 662, 1871, 1608, 154, 143, + /* 340 */ 209, 163, 171, 63, 548, 1934, 339, 49, 1264, 525, + /* 350 */ 665, 1244, 1245, 2, 490, 1490, 223, 224, 484, 1341, + /* 360 */ 55, 1262, 1825, 208, 268, 540, 94, 1794, 579, 1796, + /* 370 */ 1797, 575, 125, 570, 1289, 662, 1871, 1608, 155, 144, /* 380 */ 1870, 1867, 1336, 655, 651, 647, 643, 266, 1611, 1343, - /* 390 */ 1344, 1270, 165, 1264, 1270, 58, 1762, 216, 57, 1489, + /* 390 */ 1344, 1270, 166, 1264, 1270, 58, 1762, 217, 57, 1489, /* 400 */ 543, 538, 42, 39, 37, 1346, 1262, 1780, 496, 1488, - /* 410 */ 548, 339, 122, 1264, 305, 8, 1460, 515, 1776, 89, - /* 420 */ 73, 494, 231, 492, 1341, 542, 1262, 159, 1879, 1880, - /* 430 */ 63, 1884, 77, 119, 1265, 1351, 1263, 662, 124, 1270, + /* 410 */ 548, 339, 123, 1264, 305, 8, 1460, 515, 1776, 90, + /* 420 */ 73, 494, 231, 492, 1341, 542, 1262, 160, 1879, 1880, + /* 430 */ 63, 1884, 77, 120, 1265, 1351, 1263, 662, 125, 1270, /* 440 */ 1762, 1289, 1603, 63, 1772, 1778, 328, 1336, 479, 478, /* 450 */ 1762, 1343, 1344, 11, 10, 522, 570, 1268, 1269, 1270, /* 460 */ 1318, 1319, 1321, 1322, 1323, 1324, 1325, 1326, 572, 568, - /* 470 */ 1334, 1335, 1337, 1338, 1339, 1340, 1342, 1345, 122, 215, - /* 480 */ 9, 165, 662, 482, 481, 165, 171, 218, 1586, 195, - /* 490 */ 120, 633, 631, 160, 1879, 1880, 1265, 1884, 1263, 1660, - /* 500 */ 477, 480, 662, 149, 61, 1236, 327, 211, 467, 463, - /* 510 */ 459, 455, 194, 1585, 76, 1658, 1343, 1344, 71, 1268, + /* 470 */ 1334, 1335, 1337, 1338, 1339, 1340, 1342, 1345, 123, 216, + /* 480 */ 9, 166, 662, 482, 481, 166, 172, 219, 1586, 196, + /* 490 */ 121, 633, 631, 161, 1879, 1880, 1265, 1884, 1263, 1660, + /* 500 */ 477, 480, 662, 150, 61, 1236, 327, 212, 467, 463, + /* 510 */ 459, 455, 195, 1585, 76, 1658, 1343, 1344, 71, 1268, /* 520 */ 1269, 70, 1318, 1319, 1321, 1322, 1323, 1324, 1325, 1326, /* 530 */ 572, 568, 1334, 1335, 1337, 1338, 1339, 1340, 1342, 1345, - /* 540 */ 165, 1265, 1377, 1263, 74, 1583, 1288, 192, 450, 81, - /* 550 */ 33, 32, 342, 165, 40, 38, 36, 35, 34, 1533, - /* 560 */ 143, 1265, 1584, 1263, 1268, 1269, 621, 512, 165, 1610, + /* 540 */ 166, 1265, 1377, 1263, 74, 1583, 1288, 193, 450, 82, + /* 550 */ 33, 32, 342, 166, 40, 38, 36, 35, 34, 1533, + /* 560 */ 144, 1265, 1584, 1263, 1268, 1269, 621, 512, 166, 1610, /* 570 */ 1601, 1699, 33, 32, 1010, 1011, 40, 38, 36, 35, - /* 580 */ 34, 165, 173, 604, 1268, 1269, 1081, 1318, 1319, 1321, + /* 580 */ 34, 166, 174, 604, 1268, 1269, 1081, 1318, 1319, 1321, /* 590 */ 1322, 1323, 1324, 1325, 1326, 572, 568, 1334, 1335, 1337, - /* 600 */ 1338, 1339, 1340, 1342, 1345, 39, 37, 22, 1937, 191, - /* 610 */ 184, 1793, 189, 339, 610, 1264, 446, 1597, 1320, 1083, - /* 620 */ 513, 162, 612, 482, 481, 1934, 1341, 525, 1262, 1593, - /* 630 */ 120, 1707, 182, 134, 133, 607, 606, 605, 114, 1811, + /* 600 */ 1338, 1339, 1340, 1342, 1345, 39, 37, 22, 1937, 192, + /* 610 */ 185, 1793, 190, 339, 610, 1264, 446, 1597, 1320, 1083, + /* 620 */ 513, 163, 612, 482, 481, 1934, 1341, 525, 1262, 1593, + /* 630 */ 121, 1707, 183, 135, 134, 607, 606, 605, 115, 1811, /* 640 */ 477, 480, 7, 345, 548, 471, 610, 577, 1487, 1336, - /* 650 */ 438, 143, 1762, 525, 576, 1608, 1287, 1595, 419, 352, - /* 660 */ 1610, 1270, 351, 1365, 167, 134, 133, 607, 606, 605, - /* 670 */ 33, 32, 124, 1486, 40, 38, 36, 35, 34, 1825, + /* 650 */ 438, 144, 1762, 525, 576, 1608, 1287, 1595, 419, 352, + /* 660 */ 1610, 1270, 351, 1365, 168, 135, 134, 607, 606, 605, + /* 670 */ 33, 32, 125, 1486, 40, 38, 36, 35, 34, 1825, /* 680 */ 1485, 1608, 9, 280, 1794, 579, 1796, 1797, 575, 1762, /* 690 */ 570, 33, 32, 553, 505, 40, 38, 36, 35, 34, /* 700 */ 1937, 33, 32, 1937, 662, 40, 38, 36, 35, 34, - /* 710 */ 175, 174, 122, 162, 1762, 498, 162, 1934, 1343, 1344, + /* 710 */ 176, 175, 123, 163, 1762, 498, 163, 1934, 1343, 1344, /* 720 */ 1934, 1762, 1705, 307, 300, 27, 1484, 251, 1879, 547, /* 730 */ 525, 546, 39, 37, 1937, 1370, 1483, 302, 1886, 1287, - /* 740 */ 339, 114, 1264, 525, 307, 555, 412, 164, 476, 424, - /* 750 */ 1591, 1934, 1303, 1341, 382, 1262, 1937, 212, 1608, 373, - /* 760 */ 1363, 1886, 1883, 1265, 1509, 1263, 397, 1762, 425, 162, + /* 740 */ 339, 115, 1264, 525, 307, 555, 412, 165, 476, 424, + /* 750 */ 1591, 1934, 1303, 1341, 382, 1262, 1937, 213, 1608, 373, + /* 760 */ 1363, 1886, 1883, 1265, 1509, 1263, 397, 1762, 425, 163, /* 770 */ 399, 1608, 1482, 1934, 1479, 1292, 1336, 1762, 375, 371, /* 780 */ 1704, 1363, 300, 1415, 1478, 1882, 1268, 1269, 1270, 1318, /* 790 */ 1319, 1321, 1322, 1323, 1324, 1325, 1326, 572, 568, 1334, @@ -311,111 +311,111 @@ static const YYACTIONTYPE yy_action[] = { /* 900 */ 1359, 1360, 1361, 1362, 1366, 1367, 1368, 1369, 1471, 1762, /* 910 */ 1793, 1881, 233, 1268, 1269, 1762, 1318, 1319, 1321, 1322, /* 920 */ 1323, 1324, 1325, 1326, 572, 568, 1334, 1335, 1337, 1338, - /* 930 */ 1339, 1340, 1342, 1345, 525, 144, 525, 525, 1811, 1273, + /* 930 */ 1339, 1340, 1342, 1345, 525, 145, 525, 525, 1811, 1273, /* 940 */ 279, 525, 1891, 1397, 548, 383, 577, 389, 404, 1762, - /* 950 */ 603, 1762, 405, 576, 277, 60, 273, 132, 59, 1638, - /* 960 */ 44, 4, 1608, 1749, 1608, 1608, 1481, 553, 200, 1608, - /* 970 */ 525, 198, 124, 1520, 178, 429, 427, 1780, 1825, 1515, - /* 980 */ 560, 449, 91, 1794, 579, 1796, 1797, 575, 1776, 570, - /* 990 */ 566, 202, 1871, 553, 201, 483, 306, 1867, 1608, 336, - /* 1000 */ 335, 485, 525, 41, 54, 557, 63, 220, 1937, 1278, - /* 1010 */ 361, 1513, 122, 1605, 1772, 1778, 334, 53, 509, 1320, - /* 1020 */ 1341, 162, 1271, 11, 10, 1934, 570, 251, 1879, 547, - /* 1030 */ 1608, 546, 256, 488, 1937, 525, 204, 1781, 206, 203, - /* 1040 */ 128, 205, 1793, 1336, 90, 131, 1737, 162, 1776, 51, - /* 1050 */ 1213, 1934, 33, 32, 224, 1270, 40, 38, 36, 35, - /* 1060 */ 34, 33, 32, 1608, 132, 40, 38, 36, 35, 34, + /* 950 */ 603, 1762, 405, 576, 277, 60, 273, 133, 59, 1638, + /* 960 */ 44, 4, 1608, 1749, 1608, 1608, 1481, 553, 201, 1608, + /* 970 */ 525, 199, 125, 1520, 179, 429, 427, 1780, 1825, 1515, + /* 980 */ 560, 449, 92, 1794, 579, 1796, 1797, 575, 1776, 570, + /* 990 */ 566, 203, 1871, 553, 202, 483, 306, 1867, 1608, 336, + /* 1000 */ 335, 485, 525, 41, 54, 557, 63, 221, 1937, 1278, + /* 1010 */ 361, 1513, 123, 1605, 1772, 1778, 334, 53, 509, 1320, + /* 1020 */ 1341, 163, 1271, 11, 10, 1934, 570, 251, 1879, 547, + /* 1030 */ 1608, 546, 256, 488, 1937, 525, 205, 1781, 207, 204, + /* 1040 */ 129, 206, 1793, 1336, 91, 132, 1737, 163, 1776, 51, + /* 1050 */ 1213, 1934, 33, 32, 225, 1270, 40, 38, 36, 35, + /* 1060 */ 34, 33, 32, 1608, 133, 40, 38, 36, 35, 34, /* 1070 */ 1811, 525, 1463, 1464, 1772, 1778, 1276, 1569, 552, 68, - /* 1080 */ 67, 381, 506, 1762, 169, 576, 570, 518, 51, 1783, - /* 1090 */ 615, 535, 230, 1272, 87, 468, 1371, 1400, 565, 1608, - /* 1100 */ 301, 499, 237, 369, 84, 367, 363, 359, 356, 353, - /* 1110 */ 1825, 1112, 1069, 41, 92, 1794, 579, 1796, 1797, 575, + /* 1080 */ 67, 381, 506, 1762, 170, 576, 570, 518, 468, 1783, + /* 1090 */ 615, 535, 230, 1272, 88, 245, 1371, 1400, 565, 1608, + /* 1100 */ 301, 499, 51, 369, 85, 367, 363, 359, 356, 353, + /* 1110 */ 1825, 1112, 1069, 237, 93, 1794, 579, 1796, 1797, 575, /* 1120 */ 525, 570, 1355, 525, 1871, 525, 525, 525, 330, 1867, - /* 1130 */ 157, 510, 1785, 1050, 523, 1422, 524, 262, 346, 616, - /* 1140 */ 41, 1793, 161, 1303, 165, 245, 350, 1504, 1608, 240, - /* 1150 */ 1897, 1608, 583, 1608, 1608, 1608, 1812, 1279, 131, 1274, - /* 1160 */ 1327, 1067, 1648, 132, 1901, 116, 1051, 549, 250, 1811, - /* 1170 */ 255, 258, 3, 260, 131, 5, 355, 552, 313, 360, - /* 1180 */ 1282, 1284, 1762, 1793, 576, 561, 1229, 272, 172, 269, - /* 1190 */ 385, 1287, 568, 1334, 1335, 1337, 1338, 1339, 1340, 1140, - /* 1200 */ 406, 142, 1701, 413, 421, 1144, 420, 422, 558, 1825, - /* 1210 */ 1151, 1811, 1149, 92, 1794, 579, 1796, 1797, 575, 577, - /* 1220 */ 570, 135, 426, 1871, 1762, 428, 576, 330, 1867, 157, - /* 1230 */ 1275, 1293, 431, 430, 439, 1296, 1793, 1295, 442, 181, - /* 1240 */ 183, 443, 444, 1297, 447, 445, 186, 188, 1294, 1898, - /* 1250 */ 448, 1825, 190, 1793, 72, 92, 1794, 579, 1796, 1797, - /* 1260 */ 575, 193, 570, 451, 1811, 1871, 470, 1742, 500, 330, - /* 1270 */ 1867, 1950, 577, 472, 1598, 304, 197, 1762, 1594, 576, - /* 1280 */ 1905, 1811, 199, 115, 137, 507, 138, 1596, 1592, 577, - /* 1290 */ 210, 139, 270, 501, 1762, 140, 576, 213, 511, 322, - /* 1300 */ 533, 217, 514, 129, 1825, 519, 226, 271, 92, 1794, - /* 1310 */ 579, 1796, 1797, 575, 1292, 570, 1793, 504, 1871, 520, - /* 1320 */ 1741, 1825, 330, 1867, 1950, 92, 1794, 579, 1796, 1797, - /* 1330 */ 575, 228, 570, 1928, 1711, 1871, 516, 130, 324, 330, - /* 1340 */ 1867, 1950, 80, 521, 1811, 1609, 536, 1902, 529, 531, - /* 1350 */ 1890, 235, 577, 239, 6, 532, 1912, 1762, 329, 576, - /* 1360 */ 545, 539, 530, 528, 527, 1397, 1793, 123, 249, 1291, - /* 1370 */ 1887, 562, 1911, 553, 559, 331, 1793, 48, 82, 1893, - /* 1380 */ 581, 1652, 1581, 658, 1825, 274, 659, 246, 286, 1794, - /* 1390 */ 579, 1796, 1797, 575, 1811, 570, 52, 265, 244, 248, - /* 1400 */ 151, 254, 577, 1933, 1811, 1953, 661, 1762, 247, 576, - /* 1410 */ 1852, 150, 577, 276, 1937, 287, 1756, 1762, 1793, 576, - /* 1420 */ 297, 278, 296, 553, 556, 563, 1755, 164, 257, 65, - /* 1430 */ 1754, 1934, 1753, 66, 1825, 259, 1750, 357, 286, 1794, - /* 1440 */ 579, 1796, 1797, 575, 1825, 570, 1811, 358, 93, 1794, - /* 1450 */ 579, 1796, 1797, 575, 574, 570, 1256, 1257, 1871, 1762, - /* 1460 */ 168, 576, 564, 1867, 1937, 362, 1748, 366, 364, 365, - /* 1470 */ 1747, 368, 1793, 370, 1745, 372, 1744, 162, 1746, 374, - /* 1480 */ 1232, 1934, 1231, 1722, 1793, 1721, 1825, 379, 1720, 380, + /* 1130 */ 158, 510, 1785, 1050, 227, 525, 523, 524, 262, 616, + /* 1140 */ 41, 1793, 162, 1303, 166, 350, 346, 1504, 1608, 1422, + /* 1150 */ 1897, 1608, 41, 1608, 1608, 1608, 1812, 1279, 583, 1274, + /* 1160 */ 240, 1067, 1648, 1608, 1901, 132, 1051, 549, 255, 1811, + /* 1170 */ 250, 258, 3, 260, 133, 117, 355, 552, 132, 5, + /* 1180 */ 1282, 1284, 1762, 1793, 576, 561, 360, 1327, 313, 269, + /* 1190 */ 173, 1229, 568, 1334, 1335, 1337, 1338, 1339, 1340, 272, + /* 1200 */ 385, 143, 1287, 406, 1701, 1140, 421, 413, 558, 1825, + /* 1210 */ 426, 1811, 1144, 93, 1794, 579, 1796, 1797, 575, 577, + /* 1220 */ 570, 1151, 1149, 1871, 1762, 136, 576, 330, 1867, 158, + /* 1230 */ 1275, 420, 422, 428, 430, 1293, 1793, 431, 1296, 439, + /* 1240 */ 182, 443, 442, 184, 444, 1295, 1297, 447, 445, 1898, + /* 1250 */ 187, 1825, 189, 1793, 1294, 93, 1794, 579, 1796, 1797, + /* 1260 */ 575, 448, 570, 191, 1811, 1871, 72, 451, 194, 330, + /* 1270 */ 1867, 1950, 577, 472, 470, 304, 1598, 1762, 198, 576, + /* 1280 */ 1905, 1811, 116, 1594, 1742, 211, 200, 500, 138, 577, + /* 1290 */ 270, 139, 1596, 507, 1762, 1592, 576, 140, 141, 214, + /* 1300 */ 511, 218, 322, 501, 1825, 533, 519, 1292, 93, 1794, + /* 1310 */ 579, 1796, 1797, 575, 1609, 570, 1793, 228, 1871, 504, + /* 1320 */ 130, 1825, 330, 1867, 1950, 93, 1794, 579, 1796, 1797, + /* 1330 */ 575, 514, 570, 1928, 1741, 1871, 1711, 516, 131, 330, + /* 1340 */ 1867, 1950, 324, 521, 1811, 271, 81, 520, 1902, 529, + /* 1350 */ 1890, 235, 577, 536, 239, 6, 1912, 1762, 531, 576, + /* 1360 */ 545, 532, 530, 329, 539, 1911, 1793, 528, 527, 1397, + /* 1370 */ 124, 249, 1291, 553, 562, 559, 1793, 48, 331, 1887, + /* 1380 */ 83, 581, 1652, 246, 1825, 248, 1893, 265, 286, 1794, + /* 1390 */ 579, 1796, 1797, 575, 1811, 570, 658, 274, 1581, 659, + /* 1400 */ 247, 1933, 577, 661, 1811, 52, 244, 1762, 151, 576, + /* 1410 */ 152, 1852, 577, 276, 1937, 287, 297, 1762, 1793, 576, + /* 1420 */ 278, 1756, 296, 553, 254, 1755, 556, 165, 257, 1953, + /* 1430 */ 65, 1934, 1754, 1753, 1825, 66, 1750, 259, 286, 1794, + /* 1440 */ 579, 1796, 1797, 575, 1825, 570, 1811, 563, 94, 1794, + /* 1450 */ 579, 1796, 1797, 575, 574, 570, 358, 357, 1871, 1762, + /* 1460 */ 362, 576, 564, 1867, 1937, 1256, 1257, 1748, 1747, 169, + /* 1470 */ 364, 366, 1793, 365, 1746, 370, 368, 163, 1745, 372, + /* 1480 */ 1744, 1934, 374, 1232, 1793, 1231, 1825, 1722, 1721, 380, /* 1490 */ 294, 1794, 579, 1796, 1797, 575, 573, 570, 567, 1843, - /* 1500 */ 1811, 1719, 1201, 1694, 126, 1693, 1692, 1691, 577, 69, - /* 1510 */ 1690, 1689, 1811, 1762, 1688, 576, 1687, 1686, 395, 396, - /* 1520 */ 577, 1685, 398, 1684, 1683, 1762, 1682, 576, 1681, 1680, - /* 1530 */ 1679, 1678, 1677, 1676, 1675, 1674, 1673, 1672, 1671, 1670, - /* 1540 */ 1825, 1793, 127, 1669, 146, 1794, 579, 1796, 1797, 575, - /* 1550 */ 1668, 570, 1825, 180, 176, 1534, 93, 1794, 579, 1796, - /* 1560 */ 1797, 575, 1667, 570, 1666, 1793, 1871, 1203, 1664, 1811, - /* 1570 */ 1665, 1868, 1663, 1662, 323, 1661, 1535, 577, 177, 1532, - /* 1580 */ 1500, 117, 1762, 1793, 576, 1013, 179, 435, 554, 1951, - /* 1590 */ 155, 1012, 437, 1811, 1499, 118, 1735, 1729, 526, 1718, - /* 1600 */ 185, 577, 187, 1717, 1703, 1587, 1762, 1531, 576, 1825, - /* 1610 */ 1043, 1811, 1529, 295, 1794, 579, 1796, 1797, 575, 577, - /* 1620 */ 570, 452, 454, 1527, 1762, 453, 576, 456, 457, 458, - /* 1630 */ 1525, 461, 462, 1825, 1523, 460, 464, 295, 1794, 579, - /* 1640 */ 1796, 1797, 575, 1512, 570, 1793, 1511, 466, 1496, 465, - /* 1650 */ 1589, 1825, 50, 196, 1154, 290, 1794, 579, 1796, 1797, - /* 1660 */ 575, 1155, 570, 1793, 1588, 630, 1080, 632, 1077, 1075, - /* 1670 */ 1076, 1521, 1516, 1811, 318, 319, 486, 1514, 320, 489, - /* 1680 */ 1495, 577, 1494, 1493, 491, 495, 1762, 94, 576, 493, - /* 1690 */ 1734, 1811, 1238, 544, 1728, 502, 1716, 56, 141, 574, - /* 1700 */ 1714, 1715, 1713, 214, 1762, 1712, 576, 15, 503, 321, - /* 1710 */ 219, 1710, 1702, 1825, 225, 1793, 229, 146, 1794, 579, - /* 1720 */ 1796, 1797, 575, 227, 570, 508, 78, 79, 84, 517, - /* 1730 */ 41, 1825, 232, 1793, 23, 294, 1794, 579, 1796, 1797, - /* 1740 */ 575, 1248, 570, 1811, 1844, 1437, 16, 234, 338, 236, - /* 1750 */ 1419, 577, 47, 242, 238, 148, 1762, 1793, 576, 1421, - /* 1760 */ 1414, 1811, 1952, 241, 24, 243, 340, 1783, 83, 577, - /* 1770 */ 25, 1394, 252, 46, 1762, 1393, 576, 1782, 1454, 152, - /* 1780 */ 18, 1443, 1449, 1825, 1448, 1811, 332, 295, 1794, 579, - /* 1790 */ 1796, 1797, 575, 577, 570, 1453, 17, 1452, 1762, 333, - /* 1800 */ 576, 1825, 10, 1280, 1356, 295, 1794, 579, 1796, 1797, - /* 1810 */ 575, 19, 570, 1793, 45, 1331, 13, 1828, 569, 153, - /* 1820 */ 166, 1311, 580, 578, 1329, 1825, 1328, 1793, 31, 281, - /* 1830 */ 1794, 579, 1796, 1797, 575, 12, 570, 20, 582, 21, - /* 1840 */ 341, 1811, 1141, 584, 586, 1138, 587, 589, 1135, 577, - /* 1850 */ 590, 592, 595, 1129, 1762, 1811, 576, 1127, 593, 596, - /* 1860 */ 1118, 85, 1150, 577, 86, 62, 1133, 602, 1762, 1793, - /* 1870 */ 576, 263, 1132, 1146, 1072, 1041, 611, 1071, 1131, 1793, - /* 1880 */ 1070, 1825, 1068, 1066, 1130, 282, 1794, 579, 1796, 1797, - /* 1890 */ 575, 1065, 570, 1064, 1087, 1825, 264, 1811, 620, 289, - /* 1900 */ 1794, 579, 1796, 1797, 575, 577, 570, 1811, 1062, 1061, - /* 1910 */ 1762, 1060, 576, 1059, 1058, 577, 1057, 1056, 1084, 1082, - /* 1920 */ 1762, 1793, 576, 1053, 1047, 1528, 1052, 1526, 1049, 1048, - /* 1930 */ 1046, 640, 641, 644, 645, 1524, 642, 1825, 646, 648, - /* 1940 */ 650, 291, 1794, 579, 1796, 1797, 575, 1825, 570, 1811, - /* 1950 */ 649, 283, 1794, 579, 1796, 1797, 575, 577, 570, 1522, - /* 1960 */ 653, 652, 1762, 654, 576, 1510, 656, 1003, 1492, 267, - /* 1970 */ 660, 1467, 1266, 275, 1793, 663, 664, 1467, 1467, 1467, + /* 1500 */ 1811, 379, 1720, 1719, 1201, 1694, 127, 1693, 577, 1692, + /* 1510 */ 1691, 69, 1811, 1762, 1690, 576, 1689, 1688, 1687, 1686, + /* 1520 */ 577, 395, 1685, 398, 396, 1762, 1684, 576, 1683, 1682, + /* 1530 */ 1681, 1680, 1679, 1678, 1677, 1676, 1675, 1674, 1673, 1672, + /* 1540 */ 1825, 1793, 1671, 128, 147, 1794, 579, 1796, 1797, 575, + /* 1550 */ 1670, 570, 1825, 180, 177, 1534, 94, 1794, 579, 1796, + /* 1560 */ 1797, 575, 1669, 570, 1668, 1793, 1871, 1203, 1664, 1811, + /* 1570 */ 1667, 1868, 1666, 1665, 323, 1663, 1662, 577, 1661, 1535, + /* 1580 */ 178, 1532, 1762, 1793, 576, 1500, 118, 156, 554, 1951, + /* 1590 */ 1499, 1013, 435, 1811, 1012, 437, 181, 119, 526, 1735, + /* 1600 */ 1729, 577, 1718, 188, 186, 1717, 1762, 1703, 576, 1825, + /* 1610 */ 1587, 1811, 1531, 295, 1794, 579, 1796, 1797, 575, 577, + /* 1620 */ 570, 1043, 1529, 452, 1762, 454, 576, 1527, 456, 453, + /* 1630 */ 457, 458, 1525, 1825, 461, 462, 460, 295, 1794, 579, + /* 1640 */ 1796, 1797, 575, 1523, 570, 1793, 464, 466, 1512, 1511, + /* 1650 */ 465, 1825, 1496, 1589, 1588, 290, 1794, 579, 1796, 1797, + /* 1660 */ 575, 50, 570, 1793, 1155, 197, 1154, 630, 1080, 632, + /* 1670 */ 1077, 1076, 1075, 1811, 1521, 1516, 1514, 318, 319, 486, + /* 1680 */ 320, 577, 489, 1495, 1494, 1493, 1762, 495, 576, 95, + /* 1690 */ 1734, 1811, 491, 544, 493, 1238, 1728, 56, 142, 574, + /* 1700 */ 502, 1716, 1714, 1715, 1762, 503, 576, 1713, 215, 321, + /* 1710 */ 508, 1712, 15, 1825, 226, 1793, 1710, 147, 1794, 579, + /* 1720 */ 1796, 1797, 575, 1248, 570, 1702, 517, 220, 78, 80, + /* 1730 */ 79, 1825, 232, 1793, 23, 294, 1794, 579, 1796, 1797, + /* 1740 */ 575, 85, 570, 1811, 1844, 243, 16, 229, 338, 41, + /* 1750 */ 1783, 577, 1437, 234, 17, 238, 1762, 1793, 576, 252, + /* 1760 */ 236, 1811, 1952, 1419, 47, 242, 340, 1421, 149, 577, + /* 1770 */ 241, 24, 25, 46, 1762, 1449, 576, 1414, 84, 1782, + /* 1780 */ 153, 18, 1394, 1825, 1393, 1811, 45, 295, 1794, 579, + /* 1790 */ 1796, 1797, 575, 577, 570, 1454, 1448, 13, 1762, 1443, + /* 1800 */ 576, 1825, 332, 1453, 1452, 295, 1794, 579, 1796, 1797, + /* 1810 */ 575, 333, 570, 1793, 10, 1280, 19, 1828, 154, 1311, + /* 1820 */ 569, 1356, 582, 167, 31, 1825, 1331, 1793, 12, 281, + /* 1830 */ 1794, 579, 1796, 1797, 575, 1329, 570, 1328, 20, 21, + /* 1840 */ 341, 1811, 580, 1141, 584, 587, 578, 1138, 586, 577, + /* 1850 */ 1135, 589, 590, 592, 1762, 1811, 576, 595, 1118, 1133, + /* 1860 */ 1129, 1127, 1150, 577, 593, 1132, 602, 596, 1762, 1793, + /* 1870 */ 576, 1131, 1130, 263, 86, 87, 1146, 62, 611, 1793, + /* 1880 */ 1072, 1825, 1041, 1071, 1070, 282, 1794, 579, 1796, 1797, + /* 1890 */ 575, 1068, 570, 1066, 1065, 1825, 1064, 1811, 620, 289, + /* 1900 */ 1794, 579, 1796, 1797, 575, 577, 570, 1811, 1087, 264, + /* 1910 */ 1762, 1062, 576, 1061, 1060, 577, 1059, 1058, 1057, 1056, + /* 1920 */ 1762, 1793, 576, 1084, 1082, 1047, 1053, 1528, 1052, 1049, + /* 1930 */ 1526, 1048, 1046, 640, 641, 644, 1524, 1825, 642, 648, + /* 1940 */ 646, 291, 1794, 579, 1796, 1797, 575, 1825, 570, 1811, + /* 1950 */ 650, 283, 1794, 579, 1796, 1797, 575, 577, 570, 645, + /* 1960 */ 649, 1522, 1762, 652, 576, 653, 1510, 656, 654, 1003, + /* 1970 */ 1492, 267, 660, 663, 1793, 1266, 275, 664, 1467, 1467, /* 1980 */ 1467, 1467, 1467, 1467, 1467, 1467, 1467, 1467, 1467, 1825, /* 1990 */ 1467, 1467, 1793, 292, 1794, 579, 1796, 1797, 575, 1467, /* 2000 */ 570, 1467, 1811, 1467, 1467, 1467, 1467, 1467, 1467, 1467, @@ -568,96 +568,96 @@ static const YYCODETYPE yy_lookahead[] = { /* 1050 */ 90, 375, 8, 9, 90, 68, 12, 13, 14, 15, /* 1060 */ 16, 8, 9, 296, 43, 12, 13, 14, 15, 16, /* 1070 */ 288, 268, 125, 126, 327, 328, 172, 277, 296, 162, - /* 1080 */ 163, 164, 279, 301, 167, 303, 339, 90, 43, 46, - /* 1090 */ 13, 369, 90, 35, 89, 269, 90, 228, 111, 296, + /* 1080 */ 163, 164, 279, 301, 167, 303, 339, 90, 269, 46, + /* 1090 */ 13, 369, 90, 35, 89, 365, 90, 228, 111, 296, /* 1100 */ 183, 324, 43, 186, 99, 188, 189, 190, 191, 192, /* 1110 */ 328, 90, 35, 43, 332, 333, 334, 335, 336, 337, /* 1120 */ 268, 339, 193, 268, 342, 268, 268, 268, 346, 347, - /* 1130 */ 348, 279, 89, 35, 279, 90, 279, 279, 279, 13, - /* 1140 */ 43, 260, 360, 90, 227, 365, 269, 267, 296, 90, + /* 1130 */ 348, 279, 89, 35, 279, 268, 279, 279, 279, 13, + /* 1140 */ 43, 260, 360, 90, 227, 269, 279, 267, 296, 90, /* 1150 */ 368, 296, 43, 296, 296, 296, 288, 170, 43, 172, - /* 1160 */ 90, 35, 300, 43, 331, 43, 68, 356, 349, 288, - /* 1170 */ 372, 372, 359, 372, 43, 229, 326, 296, 325, 47, - /* 1180 */ 193, 194, 301, 260, 303, 248, 168, 90, 42, 319, - /* 1190 */ 308, 20, 205, 206, 207, 208, 209, 210, 211, 90, - /* 1200 */ 268, 157, 268, 308, 152, 90, 306, 306, 246, 328, - /* 1210 */ 90, 288, 90, 332, 333, 334, 335, 336, 337, 296, - /* 1220 */ 339, 90, 268, 342, 301, 268, 303, 346, 347, 348, - /* 1230 */ 172, 20, 262, 268, 262, 20, 260, 20, 323, 272, - /* 1240 */ 272, 303, 316, 20, 316, 318, 272, 272, 20, 368, - /* 1250 */ 309, 328, 272, 260, 272, 332, 333, 334, 335, 336, - /* 1260 */ 337, 272, 339, 268, 288, 342, 262, 301, 175, 346, - /* 1270 */ 347, 348, 296, 288, 288, 262, 288, 301, 288, 303, - /* 1280 */ 357, 288, 288, 268, 288, 268, 288, 288, 288, 296, - /* 1290 */ 270, 288, 323, 322, 301, 288, 303, 270, 268, 316, - /* 1300 */ 234, 270, 301, 312, 328, 154, 296, 284, 332, 333, - /* 1310 */ 334, 335, 336, 337, 20, 339, 260, 303, 342, 310, - /* 1320 */ 301, 328, 346, 347, 348, 332, 333, 334, 335, 336, - /* 1330 */ 337, 270, 339, 357, 301, 342, 301, 312, 301, 346, - /* 1340 */ 347, 348, 270, 309, 288, 296, 235, 331, 301, 301, - /* 1350 */ 357, 312, 296, 312, 241, 301, 364, 301, 301, 303, - /* 1360 */ 161, 301, 243, 242, 230, 226, 260, 296, 326, 20, - /* 1370 */ 330, 247, 364, 317, 245, 250, 260, 89, 89, 367, - /* 1380 */ 292, 301, 278, 36, 328, 268, 263, 363, 332, 333, - /* 1390 */ 334, 335, 336, 337, 288, 339, 320, 270, 366, 361, - /* 1400 */ 364, 373, 296, 374, 288, 379, 262, 301, 362, 303, - /* 1410 */ 345, 315, 296, 271, 358, 282, 0, 301, 260, 303, - /* 1420 */ 282, 258, 282, 317, 374, 374, 0, 371, 373, 177, - /* 1430 */ 0, 375, 0, 42, 328, 373, 0, 35, 332, 333, - /* 1440 */ 334, 335, 336, 337, 328, 339, 288, 187, 332, 333, - /* 1450 */ 334, 335, 336, 337, 296, 339, 35, 35, 342, 301, - /* 1460 */ 35, 303, 346, 347, 358, 187, 0, 187, 35, 35, - /* 1470 */ 0, 187, 260, 35, 0, 22, 0, 371, 0, 35, - /* 1480 */ 172, 375, 170, 0, 260, 0, 328, 166, 0, 165, + /* 1160 */ 90, 35, 300, 296, 331, 43, 68, 356, 372, 288, + /* 1170 */ 349, 372, 359, 372, 43, 43, 326, 296, 43, 229, + /* 1180 */ 193, 194, 301, 260, 303, 248, 47, 90, 325, 319, + /* 1190 */ 42, 168, 205, 206, 207, 208, 209, 210, 211, 90, + /* 1200 */ 308, 157, 20, 268, 268, 90, 152, 308, 246, 328, + /* 1210 */ 268, 288, 90, 332, 333, 334, 335, 336, 337, 296, + /* 1220 */ 339, 90, 90, 342, 301, 90, 303, 346, 347, 348, + /* 1230 */ 172, 306, 306, 268, 268, 20, 260, 262, 20, 262, + /* 1240 */ 272, 303, 323, 272, 316, 20, 20, 316, 318, 368, + /* 1250 */ 272, 328, 272, 260, 20, 332, 333, 334, 335, 336, + /* 1260 */ 337, 309, 339, 272, 288, 342, 272, 268, 272, 346, + /* 1270 */ 347, 348, 296, 288, 262, 262, 288, 301, 288, 303, + /* 1280 */ 357, 288, 268, 288, 301, 270, 288, 175, 288, 296, + /* 1290 */ 323, 288, 288, 268, 301, 288, 303, 288, 288, 270, + /* 1300 */ 268, 270, 316, 322, 328, 234, 154, 20, 332, 333, + /* 1310 */ 334, 335, 336, 337, 296, 339, 260, 270, 342, 303, + /* 1320 */ 312, 328, 346, 347, 348, 332, 333, 334, 335, 336, + /* 1330 */ 337, 301, 339, 357, 301, 342, 301, 301, 312, 346, + /* 1340 */ 347, 348, 301, 309, 288, 284, 270, 310, 331, 301, + /* 1350 */ 357, 312, 296, 235, 312, 241, 364, 301, 301, 303, + /* 1360 */ 161, 301, 243, 301, 301, 364, 260, 242, 230, 226, + /* 1370 */ 296, 326, 20, 317, 247, 245, 260, 89, 250, 330, + /* 1380 */ 89, 292, 301, 363, 328, 361, 367, 270, 332, 333, + /* 1390 */ 334, 335, 336, 337, 288, 339, 36, 268, 278, 263, + /* 1400 */ 362, 374, 296, 262, 288, 320, 366, 301, 315, 303, + /* 1410 */ 364, 345, 296, 271, 358, 282, 282, 301, 260, 303, + /* 1420 */ 258, 0, 282, 317, 373, 0, 374, 371, 373, 379, + /* 1430 */ 177, 375, 0, 0, 328, 42, 0, 373, 332, 333, + /* 1440 */ 334, 335, 336, 337, 328, 339, 288, 374, 332, 333, + /* 1450 */ 334, 335, 336, 337, 296, 339, 187, 35, 342, 301, + /* 1460 */ 187, 303, 346, 347, 358, 35, 35, 0, 0, 35, + /* 1470 */ 35, 187, 260, 35, 0, 35, 187, 371, 0, 22, + /* 1480 */ 0, 375, 35, 172, 260, 170, 328, 0, 0, 165, /* 1490 */ 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, - /* 1500 */ 288, 0, 46, 0, 42, 0, 0, 0, 296, 149, - /* 1510 */ 0, 0, 288, 301, 0, 303, 0, 0, 144, 35, - /* 1520 */ 296, 0, 144, 0, 0, 301, 0, 303, 0, 0, + /* 1500 */ 288, 166, 0, 0, 46, 0, 42, 0, 296, 0, + /* 1510 */ 0, 149, 288, 301, 0, 303, 0, 0, 0, 0, + /* 1520 */ 296, 144, 0, 144, 35, 301, 0, 303, 0, 0, /* 1530 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* 1540 */ 328, 260, 42, 0, 332, 333, 334, 335, 336, 337, - /* 1550 */ 0, 339, 328, 40, 56, 0, 332, 333, 334, 335, + /* 1540 */ 328, 260, 0, 42, 332, 333, 334, 335, 336, 337, + /* 1550 */ 0, 339, 328, 42, 56, 0, 332, 333, 334, 335, /* 1560 */ 336, 337, 0, 339, 0, 260, 342, 22, 0, 288, - /* 1570 */ 0, 347, 0, 0, 293, 0, 0, 296, 56, 0, - /* 1580 */ 0, 39, 301, 260, 303, 14, 42, 46, 376, 377, - /* 1590 */ 43, 14, 46, 288, 0, 39, 0, 0, 293, 0, - /* 1600 */ 39, 296, 161, 0, 0, 0, 301, 0, 303, 328, - /* 1610 */ 62, 288, 0, 332, 333, 334, 335, 336, 337, 296, - /* 1620 */ 339, 35, 39, 0, 301, 47, 303, 35, 47, 39, - /* 1630 */ 0, 47, 39, 328, 0, 35, 35, 332, 333, 334, - /* 1640 */ 335, 336, 337, 0, 339, 260, 0, 39, 0, 47, - /* 1650 */ 0, 328, 98, 96, 22, 332, 333, 334, 335, 336, - /* 1660 */ 337, 35, 339, 260, 0, 43, 35, 43, 35, 22, - /* 1670 */ 35, 0, 0, 288, 22, 22, 49, 0, 22, 35, - /* 1680 */ 0, 296, 0, 0, 35, 22, 301, 20, 303, 35, - /* 1690 */ 0, 288, 35, 370, 0, 22, 0, 157, 173, 296, - /* 1700 */ 0, 0, 0, 154, 301, 0, 303, 89, 157, 157, - /* 1710 */ 90, 0, 0, 328, 89, 260, 153, 332, 333, 334, - /* 1720 */ 335, 336, 337, 39, 339, 159, 89, 89, 99, 155, - /* 1730 */ 43, 328, 46, 260, 89, 332, 333, 334, 335, 336, - /* 1740 */ 337, 182, 339, 288, 341, 90, 231, 89, 293, 90, - /* 1750 */ 90, 296, 43, 43, 89, 89, 301, 260, 303, 90, - /* 1760 */ 90, 288, 377, 89, 89, 46, 293, 46, 89, 296, - /* 1770 */ 43, 90, 46, 43, 301, 90, 303, 46, 90, 46, - /* 1780 */ 43, 90, 35, 328, 35, 288, 35, 332, 333, 334, - /* 1790 */ 335, 336, 337, 296, 339, 35, 231, 35, 301, 35, - /* 1800 */ 303, 328, 2, 22, 193, 332, 333, 334, 335, 336, - /* 1810 */ 337, 43, 339, 260, 225, 90, 231, 89, 89, 46, - /* 1820 */ 46, 22, 100, 195, 90, 328, 90, 260, 89, 332, - /* 1830 */ 333, 334, 335, 336, 337, 89, 339, 89, 35, 89, - /* 1840 */ 35, 288, 90, 89, 35, 90, 89, 35, 90, 296, - /* 1850 */ 89, 35, 35, 90, 301, 288, 303, 90, 89, 89, - /* 1860 */ 22, 89, 35, 296, 89, 89, 113, 101, 301, 260, - /* 1870 */ 303, 43, 113, 22, 35, 62, 61, 35, 113, 260, - /* 1880 */ 35, 328, 35, 35, 113, 332, 333, 334, 335, 336, - /* 1890 */ 337, 35, 339, 35, 68, 328, 43, 288, 87, 332, - /* 1900 */ 333, 334, 335, 336, 337, 296, 339, 288, 35, 35, - /* 1910 */ 301, 22, 303, 35, 22, 296, 35, 35, 68, 35, - /* 1920 */ 301, 260, 303, 35, 22, 0, 35, 0, 35, 35, - /* 1930 */ 35, 35, 47, 35, 47, 0, 39, 328, 39, 35, + /* 1570 */ 0, 347, 0, 0, 293, 0, 0, 296, 0, 0, + /* 1580 */ 56, 0, 301, 260, 303, 0, 39, 43, 376, 377, + /* 1590 */ 0, 14, 46, 288, 14, 46, 40, 39, 293, 0, + /* 1600 */ 0, 296, 0, 161, 39, 0, 301, 0, 303, 328, + /* 1610 */ 0, 288, 0, 332, 333, 334, 335, 336, 337, 296, + /* 1620 */ 339, 62, 0, 35, 301, 39, 303, 0, 35, 47, + /* 1630 */ 47, 39, 0, 328, 47, 39, 35, 332, 333, 334, + /* 1640 */ 335, 336, 337, 0, 339, 260, 35, 39, 0, 0, + /* 1650 */ 47, 328, 0, 0, 0, 332, 333, 334, 335, 336, + /* 1660 */ 337, 98, 339, 260, 35, 96, 22, 43, 35, 43, + /* 1670 */ 35, 35, 22, 288, 0, 0, 0, 22, 22, 49, + /* 1680 */ 22, 296, 35, 0, 0, 0, 301, 22, 303, 20, + /* 1690 */ 0, 288, 35, 370, 35, 35, 0, 157, 173, 296, + /* 1700 */ 22, 0, 0, 0, 301, 157, 303, 0, 154, 157, + /* 1710 */ 159, 0, 89, 328, 89, 260, 0, 332, 333, 334, + /* 1720 */ 335, 336, 337, 182, 339, 0, 155, 90, 89, 89, + /* 1730 */ 39, 328, 46, 260, 89, 332, 333, 334, 335, 336, + /* 1740 */ 337, 99, 339, 288, 341, 46, 231, 153, 293, 43, + /* 1750 */ 46, 296, 90, 89, 231, 89, 301, 260, 303, 46, + /* 1760 */ 90, 288, 377, 90, 43, 43, 293, 90, 89, 296, + /* 1770 */ 89, 89, 43, 43, 301, 35, 303, 90, 89, 46, + /* 1780 */ 46, 43, 90, 328, 90, 288, 225, 332, 333, 334, + /* 1790 */ 335, 336, 337, 296, 339, 90, 35, 231, 301, 90, + /* 1800 */ 303, 328, 35, 35, 35, 332, 333, 334, 335, 336, + /* 1810 */ 337, 35, 339, 260, 2, 22, 43, 89, 46, 22, + /* 1820 */ 89, 193, 35, 46, 89, 328, 90, 260, 89, 332, + /* 1830 */ 333, 334, 335, 336, 337, 90, 339, 90, 89, 89, + /* 1840 */ 35, 288, 100, 90, 89, 89, 195, 90, 35, 296, + /* 1850 */ 90, 35, 89, 35, 301, 288, 303, 35, 22, 113, + /* 1860 */ 90, 90, 35, 296, 89, 113, 101, 89, 301, 260, + /* 1870 */ 303, 113, 113, 43, 89, 89, 22, 89, 61, 260, + /* 1880 */ 35, 328, 62, 35, 35, 332, 333, 334, 335, 336, + /* 1890 */ 337, 35, 339, 35, 35, 328, 35, 288, 87, 332, + /* 1900 */ 333, 334, 335, 336, 337, 296, 339, 288, 68, 43, + /* 1910 */ 301, 35, 303, 35, 22, 296, 35, 22, 35, 35, + /* 1920 */ 301, 260, 303, 68, 35, 22, 35, 0, 35, 35, + /* 1930 */ 0, 35, 35, 35, 47, 35, 0, 328, 39, 35, /* 1940 */ 39, 332, 333, 334, 335, 336, 337, 328, 339, 288, - /* 1950 */ 47, 332, 333, 334, 335, 336, 337, 296, 339, 0, - /* 1960 */ 47, 35, 301, 39, 303, 0, 35, 35, 0, 22, - /* 1970 */ 21, 380, 22, 22, 260, 21, 20, 380, 380, 380, + /* 1950 */ 39, 332, 333, 334, 335, 336, 337, 296, 339, 47, + /* 1960 */ 47, 0, 301, 35, 303, 47, 0, 35, 39, 35, + /* 1970 */ 0, 22, 21, 21, 260, 22, 22, 20, 380, 380, /* 1980 */ 380, 380, 380, 380, 380, 380, 380, 380, 380, 328, /* 1990 */ 380, 380, 260, 332, 333, 334, 335, 336, 337, 380, /* 2000 */ 339, 380, 288, 380, 380, 380, 380, 380, 380, 380, @@ -703,7 +703,7 @@ static const YYCODETYPE yy_lookahead[] = { }; #define YY_SHIFT_COUNT (665) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (1968) +#define YY_SHIFT_MAX (1970) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 917, 0, 0, 62, 62, 264, 264, 264, 326, 326, /* 10 */ 264, 264, 391, 593, 720, 593, 593, 593, 593, 593, @@ -712,66 +712,66 @@ static const unsigned short int yy_shift_ofst[] = { /* 40 */ 593, 593, 313, 313, 199, 199, 199, 987, 987, 354, /* 50 */ 987, 987, 165, 341, 254, 258, 254, 79, 79, 36, /* 60 */ 36, 97, 18, 254, 254, 79, 79, 79, 79, 79, - /* 70 */ 79, 79, 79, 79, 82, 79, 79, 79, 170, 205, - /* 80 */ 79, 79, 205, 405, 79, 205, 205, 205, 79, 158, - /* 90 */ 719, 662, 683, 683, 108, 371, 371, 371, 371, 371, + /* 70 */ 79, 79, 79, 79, 82, 79, 79, 79, 170, 79, + /* 80 */ 205, 79, 79, 205, 405, 79, 205, 205, 205, 79, + /* 90 */ 158, 719, 662, 683, 683, 108, 371, 371, 371, 371, /* 100 */ 371, 371, 371, 371, 371, 371, 371, 371, 371, 371, - /* 110 */ 371, 371, 371, 371, 134, 419, 18, 636, 636, 488, - /* 120 */ 551, 562, 90, 90, 90, 551, 526, 526, 170, 16, - /* 130 */ 16, 205, 205, 323, 323, 483, 498, 198, 198, 198, - /* 140 */ 198, 198, 198, 198, 331, 21, 167, 559, 43, 50, - /* 150 */ 112, 168, 99, 421, 19, 530, 771, 755, 717, 603, - /* 160 */ 717, 918, 501, 501, 501, 869, 822, 946, 1132, 1018, - /* 170 */ 1146, 1171, 1171, 1146, 1052, 1052, 1171, 1171, 1171, 1211, - /* 180 */ 1211, 1215, 82, 170, 82, 1217, 1223, 82, 1217, 82, - /* 190 */ 1228, 82, 82, 1171, 82, 1211, 205, 205, 205, 205, - /* 200 */ 205, 205, 205, 205, 205, 205, 205, 1171, 1211, 323, - /* 210 */ 1215, 158, 1093, 170, 158, 1171, 1171, 1217, 158, 1066, - /* 220 */ 323, 323, 323, 323, 1066, 323, 1151, 526, 1228, 158, - /* 230 */ 483, 158, 526, 1294, 323, 1111, 1066, 323, 323, 1111, - /* 240 */ 1066, 323, 323, 205, 1113, 1199, 1111, 1119, 1121, 1134, - /* 250 */ 946, 1139, 526, 1349, 1124, 1129, 1125, 1124, 1129, 1124, - /* 260 */ 1129, 1288, 1289, 323, 498, 1171, 158, 1347, 1211, 2395, + /* 110 */ 371, 371, 371, 371, 371, 134, 419, 18, 636, 636, + /* 120 */ 488, 551, 562, 90, 90, 90, 551, 526, 526, 170, + /* 130 */ 16, 16, 205, 205, 323, 323, 483, 498, 198, 198, + /* 140 */ 198, 198, 198, 198, 198, 331, 21, 167, 559, 43, + /* 150 */ 50, 112, 168, 99, 421, 19, 530, 771, 755, 717, + /* 160 */ 603, 717, 918, 501, 501, 501, 869, 822, 950, 1139, + /* 170 */ 1023, 1148, 1182, 1182, 1148, 1054, 1054, 1182, 1182, 1182, + /* 180 */ 1215, 1215, 1218, 82, 170, 82, 1225, 1226, 82, 1225, + /* 190 */ 82, 1234, 82, 82, 1182, 82, 1215, 205, 205, 205, + /* 200 */ 205, 205, 205, 205, 205, 205, 205, 205, 1182, 1215, + /* 210 */ 323, 1218, 158, 1112, 170, 158, 1182, 1182, 1225, 158, + /* 220 */ 1071, 323, 323, 323, 323, 1071, 323, 1152, 1234, 158, + /* 230 */ 483, 158, 526, 1287, 323, 1118, 1071, 323, 323, 1118, + /* 240 */ 1071, 323, 323, 205, 1114, 1199, 1118, 1119, 1125, 1138, + /* 250 */ 950, 1143, 526, 1352, 1127, 1130, 1128, 1127, 1130, 1127, + /* 260 */ 1130, 1288, 1291, 323, 498, 1182, 158, 1360, 1215, 2395, /* 270 */ 2395, 2395, 2395, 2395, 2395, 2395, 83, 456, 214, 307, /* 280 */ 208, 564, 693, 808, 824, 1044, 1053, 513, 542, 542, /* 290 */ 542, 542, 542, 542, 542, 542, 545, 129, 13, 13, /* 300 */ 236, 594, 430, 581, 387, 172, 452, 511, 106, 106, /* 310 */ 106, 106, 914, 963, 875, 898, 943, 945, 973, 979, - /* 320 */ 1011, 423, 860, 960, 964, 997, 1002, 1021, 1045, 1059, - /* 330 */ 1022, 947, 962, 937, 1006, 904, 1058, 929, 1070, 1043, - /* 340 */ 1097, 1109, 1115, 1120, 1122, 1131, 1005, 1077, 1126, 1098, - /* 350 */ 764, 1416, 1426, 1252, 1430, 1432, 1391, 1436, 1402, 1260, - /* 360 */ 1421, 1422, 1425, 1278, 1466, 1433, 1434, 1280, 1470, 1284, - /* 370 */ 1478, 1438, 1474, 1453, 1476, 1444, 1308, 1312, 1483, 1485, - /* 380 */ 1321, 1324, 1488, 1501, 1456, 1503, 1462, 1505, 1506, 1507, - /* 390 */ 1360, 1510, 1511, 1514, 1516, 1517, 1374, 1484, 1521, 1378, - /* 400 */ 1523, 1524, 1526, 1528, 1529, 1530, 1531, 1532, 1533, 1534, - /* 410 */ 1535, 1536, 1537, 1538, 1500, 1539, 1543, 1550, 1562, 1564, - /* 420 */ 1570, 1545, 1568, 1572, 1573, 1575, 1576, 1498, 1555, 1522, - /* 430 */ 1579, 1580, 1544, 1542, 1547, 1571, 1541, 1577, 1546, 1594, - /* 440 */ 1513, 1556, 1596, 1597, 1599, 1561, 1441, 1603, 1604, 1605, - /* 450 */ 1548, 1607, 1612, 1586, 1578, 1583, 1623, 1592, 1581, 1590, - /* 460 */ 1630, 1600, 1584, 1593, 1634, 1601, 1602, 1608, 1643, 1646, - /* 470 */ 1648, 1650, 1554, 1557, 1626, 1632, 1664, 1631, 1622, 1624, - /* 480 */ 1633, 1635, 1647, 1671, 1652, 1672, 1653, 1627, 1677, 1656, - /* 490 */ 1644, 1680, 1649, 1682, 1654, 1683, 1663, 1667, 1690, 1540, - /* 500 */ 1657, 1694, 1525, 1673, 1551, 1549, 1696, 1700, 1552, 1566, - /* 510 */ 1701, 1702, 1705, 1618, 1620, 1559, 1711, 1625, 1574, 1637, - /* 520 */ 1712, 1684, 1563, 1638, 1629, 1686, 1687, 1515, 1645, 1655, - /* 530 */ 1658, 1659, 1660, 1665, 1709, 1669, 1666, 1674, 1675, 1670, - /* 540 */ 1710, 1719, 1721, 1679, 1727, 1565, 1681, 1685, 1726, 1589, - /* 550 */ 1730, 1731, 1733, 1688, 1737, 1585, 1691, 1747, 1749, 1751, - /* 560 */ 1760, 1762, 1764, 1691, 1800, 1781, 1611, 1768, 1728, 1725, - /* 570 */ 1729, 1734, 1739, 1736, 1773, 1746, 1748, 1774, 1799, 1628, - /* 580 */ 1750, 1722, 1752, 1803, 1805, 1754, 1755, 1809, 1757, 1758, - /* 590 */ 1812, 1761, 1763, 1816, 1769, 1767, 1817, 1770, 1753, 1759, - /* 600 */ 1765, 1771, 1838, 1766, 1772, 1775, 1827, 1776, 1828, 1828, - /* 610 */ 1851, 1813, 1815, 1839, 1842, 1845, 1847, 1848, 1856, 1858, - /* 620 */ 1826, 1811, 1853, 1873, 1874, 1889, 1878, 1892, 1881, 1882, - /* 630 */ 1850, 1622, 1884, 1624, 1888, 1891, 1893, 1894, 1902, 1895, - /* 640 */ 1925, 1896, 1885, 1897, 1927, 1898, 1887, 1899, 1935, 1904, - /* 650 */ 1903, 1901, 1959, 1926, 1913, 1924, 1965, 1931, 1932, 1968, - /* 660 */ 1947, 1949, 1950, 1951, 1954, 1956, + /* 320 */ 1011, 423, 860, 960, 964, 997, 1002, 1021, 1059, 1070, + /* 330 */ 1022, 947, 962, 937, 1006, 904, 1058, 929, 1097, 1043, + /* 340 */ 1109, 1115, 1122, 1131, 1132, 1135, 1005, 1077, 1126, 1098, + /* 350 */ 764, 1421, 1425, 1253, 1432, 1433, 1393, 1436, 1422, 1269, + /* 360 */ 1430, 1431, 1434, 1273, 1467, 1435, 1438, 1284, 1468, 1289, + /* 370 */ 1474, 1440, 1478, 1457, 1480, 1447, 1311, 1315, 1487, 1488, + /* 380 */ 1335, 1324, 1502, 1503, 1458, 1505, 1464, 1507, 1509, 1510, + /* 390 */ 1362, 1514, 1516, 1517, 1518, 1519, 1377, 1489, 1522, 1379, + /* 400 */ 1526, 1528, 1529, 1530, 1531, 1532, 1533, 1534, 1535, 1536, + /* 410 */ 1537, 1538, 1539, 1542, 1501, 1550, 1562, 1564, 1570, 1572, + /* 420 */ 1573, 1545, 1568, 1575, 1576, 1578, 1579, 1498, 1555, 1524, + /* 430 */ 1581, 1585, 1511, 1547, 1544, 1577, 1546, 1580, 1549, 1590, + /* 440 */ 1556, 1558, 1599, 1600, 1602, 1565, 1442, 1605, 1607, 1610, + /* 450 */ 1559, 1612, 1622, 1588, 1582, 1586, 1627, 1593, 1583, 1592, + /* 460 */ 1632, 1601, 1587, 1596, 1643, 1611, 1603, 1608, 1648, 1649, + /* 470 */ 1652, 1653, 1563, 1569, 1629, 1644, 1654, 1633, 1624, 1626, + /* 480 */ 1635, 1636, 1650, 1674, 1655, 1675, 1656, 1630, 1676, 1658, + /* 490 */ 1647, 1683, 1657, 1684, 1659, 1685, 1665, 1669, 1690, 1540, + /* 500 */ 1660, 1696, 1525, 1678, 1548, 1554, 1701, 1702, 1552, 1551, + /* 510 */ 1703, 1707, 1711, 1623, 1637, 1541, 1716, 1625, 1571, 1639, + /* 520 */ 1725, 1691, 1594, 1640, 1642, 1686, 1706, 1515, 1645, 1662, + /* 530 */ 1664, 1670, 1673, 1666, 1721, 1677, 1679, 1681, 1682, 1687, + /* 540 */ 1722, 1699, 1704, 1689, 1729, 1523, 1692, 1694, 1713, 1561, + /* 550 */ 1730, 1733, 1734, 1705, 1738, 1566, 1709, 1740, 1761, 1767, + /* 560 */ 1768, 1769, 1776, 1709, 1812, 1793, 1628, 1773, 1728, 1736, + /* 570 */ 1731, 1745, 1735, 1747, 1772, 1739, 1749, 1777, 1797, 1651, + /* 580 */ 1750, 1742, 1753, 1787, 1805, 1755, 1757, 1813, 1756, 1760, + /* 590 */ 1816, 1763, 1770, 1818, 1775, 1771, 1822, 1778, 1746, 1752, + /* 600 */ 1758, 1759, 1836, 1765, 1785, 1786, 1827, 1788, 1830, 1830, + /* 610 */ 1854, 1820, 1817, 1845, 1848, 1849, 1856, 1858, 1859, 1861, + /* 620 */ 1840, 1811, 1866, 1876, 1878, 1892, 1881, 1895, 1883, 1884, + /* 630 */ 1855, 1624, 1889, 1626, 1891, 1893, 1894, 1896, 1903, 1897, + /* 640 */ 1927, 1898, 1887, 1899, 1930, 1900, 1912, 1901, 1936, 1904, + /* 650 */ 1913, 1911, 1961, 1928, 1918, 1929, 1966, 1932, 1934, 1970, + /* 660 */ 1949, 1951, 1953, 1954, 1952, 1957, }; #define YY_REDUCE_COUNT (275) #define YY_REDUCE_MIN (-357) @@ -784,27 +784,27 @@ static const short yy_reduce_ofst[] = { /* 40 */ 2045, 2055, 376, 676, -259, 76, 142, 117, 687, -180, /* 50 */ -284, 747, -30, 250, 342, 345, 398, 359, 462, -264, /* 60 */ -256, -357, -240, -339, -244, 81, 385, 475, 666, 668, - /* 70 */ 669, 673, 702, 734, 148, 767, 803, 852, -280, -273, - /* 80 */ 855, 857, 211, 31, 858, 272, 523, 363, 859, -205, - /* 90 */ -20, -159, -159, -159, -171, -86, 95, 139, 149, 388, - /* 100 */ 413, 420, 466, 476, 512, 514, 524, 548, 570, 574, - /* 110 */ 588, 608, 614, 648, -233, -213, -298, -76, 53, -215, - /* 120 */ 174, 279, 408, 431, 557, 217, 35, 275, 317, 409, - /* 130 */ 467, 91, 540, 590, 595, 675, 565, 328, 340, 368, - /* 140 */ 461, 468, 606, 661, 373, 705, 654, 800, 722, 826, - /* 150 */ 777, 780, 868, 868, 877, 880, 862, 833, 811, 811, - /* 160 */ 811, 819, 798, 799, 801, 813, 868, 850, 853, 870, - /* 170 */ 882, 932, 934, 895, 900, 901, 954, 957, 965, 970, - /* 180 */ 972, 915, 967, 938, 968, 926, 927, 974, 928, 975, - /* 190 */ 941, 980, 982, 995, 989, 1004, 985, 986, 988, 990, - /* 200 */ 994, 996, 998, 999, 1000, 1003, 1007, 1015, 1013, 966, - /* 210 */ 969, 1020, 971, 1014, 1027, 1017, 1030, 983, 1031, 991, - /* 220 */ 1001, 1019, 1033, 1035, 1025, 1037, 1009, 1010, 1034, 1061, - /* 230 */ 1023, 1072, 1049, 1016, 1047, 992, 1039, 1048, 1054, 1008, - /* 240 */ 1041, 1057, 1060, 868, 1012, 1032, 1036, 1024, 1046, 1038, - /* 250 */ 1042, 811, 1071, 1040, 1029, 1028, 1026, 1050, 1055, 1051, - /* 260 */ 1062, 1065, 1088, 1080, 1104, 1117, 1127, 1123, 1144, 1076, - /* 270 */ 1096, 1133, 1138, 1140, 1142, 1163, + /* 70 */ 669, 673, 702, 734, 148, 767, 803, 852, -280, 855, + /* 80 */ -273, 857, 858, 211, 31, 859, 272, 523, 363, 867, + /* 90 */ -205, -20, -159, -159, -159, -171, -86, 95, 139, 149, + /* 100 */ 388, 413, 420, 466, 476, 512, 514, 524, 548, 570, + /* 110 */ 574, 588, 608, 614, 648, -233, -213, -298, -76, 53, + /* 120 */ -215, 174, 279, 408, 431, 557, 217, 35, 275, 317, + /* 130 */ 409, 467, 91, 540, 590, 595, 675, 565, 328, 340, + /* 140 */ 368, 461, 468, 606, 661, 373, 705, 654, 800, 722, + /* 150 */ 819, 777, 730, 868, 868, 876, 880, 862, 833, 811, + /* 160 */ 811, 811, 821, 796, 799, 801, 813, 868, 850, 863, + /* 170 */ 870, 892, 935, 936, 899, 925, 926, 942, 965, 966, + /* 180 */ 975, 977, 919, 968, 938, 971, 928, 930, 978, 931, + /* 190 */ 980, 952, 991, 994, 999, 996, 1012, 985, 988, 990, + /* 200 */ 995, 998, 1000, 1003, 1004, 1007, 1009, 1010, 1014, 1013, + /* 210 */ 983, 967, 1015, 981, 1016, 1029, 1025, 1032, 986, 1031, + /* 220 */ 1008, 1030, 1033, 1035, 1036, 1026, 1041, 1037, 1034, 1047, + /* 230 */ 1061, 1076, 1018, 1017, 1048, 992, 1039, 1057, 1060, 1001, + /* 240 */ 1042, 1062, 1063, 868, 1019, 1040, 1046, 1020, 1038, 1024, + /* 250 */ 1045, 811, 1074, 1049, 1027, 1051, 1050, 1052, 1055, 1073, + /* 260 */ 1064, 1066, 1089, 1081, 1120, 1129, 1117, 1136, 1141, 1085, + /* 270 */ 1093, 1133, 1134, 1140, 1142, 1162, }; static const YYACTIONTYPE yy_default[] = { /* 0 */ 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, @@ -815,21 +815,21 @@ static const YYACTIONTYPE yy_default[] = { /* 50 */ 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, /* 60 */ 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, /* 70 */ 1465, 1465, 1465, 1465, 1539, 1465, 1465, 1465, 1465, 1465, - /* 80 */ 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1537, - /* 90 */ 1695, 1465, 1873, 1465, 1465, 1465, 1465, 1465, 1465, 1465, + /* 80 */ 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, + /* 90 */ 1537, 1695, 1465, 1873, 1465, 1465, 1465, 1465, 1465, 1465, /* 100 */ 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, - /* 110 */ 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1539, - /* 120 */ 1465, 1537, 1885, 1885, 1885, 1465, 1465, 1465, 1465, 1738, - /* 130 */ 1738, 1465, 1465, 1465, 1465, 1637, 1465, 1465, 1465, 1465, - /* 140 */ 1465, 1465, 1465, 1465, 1730, 1465, 1954, 1465, 1465, 1465, - /* 150 */ 1736, 1908, 1465, 1465, 1465, 1465, 1590, 1900, 1877, 1891, - /* 160 */ 1878, 1875, 1939, 1939, 1939, 1894, 1465, 1904, 1465, 1723, - /* 170 */ 1700, 1465, 1465, 1700, 1697, 1697, 1465, 1465, 1465, 1465, - /* 180 */ 1465, 1465, 1539, 1465, 1539, 1465, 1465, 1539, 1465, 1539, - /* 190 */ 1465, 1539, 1539, 1465, 1539, 1465, 1465, 1465, 1465, 1465, + /* 110 */ 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, + /* 120 */ 1539, 1465, 1537, 1885, 1885, 1885, 1465, 1465, 1465, 1465, + /* 130 */ 1738, 1738, 1465, 1465, 1465, 1465, 1637, 1465, 1465, 1465, + /* 140 */ 1465, 1465, 1465, 1465, 1465, 1730, 1465, 1954, 1465, 1465, + /* 150 */ 1465, 1736, 1908, 1465, 1465, 1465, 1465, 1590, 1900, 1877, + /* 160 */ 1891, 1878, 1875, 1939, 1939, 1939, 1894, 1465, 1904, 1465, + /* 170 */ 1723, 1700, 1465, 1465, 1700, 1697, 1697, 1465, 1465, 1465, + /* 180 */ 1465, 1465, 1465, 1539, 1465, 1539, 1465, 1465, 1539, 1465, + /* 190 */ 1539, 1465, 1539, 1539, 1465, 1539, 1465, 1465, 1465, 1465, /* 200 */ 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, 1465, - /* 210 */ 1465, 1537, 1732, 1465, 1537, 1465, 1465, 1465, 1537, 1913, - /* 220 */ 1465, 1465, 1465, 1465, 1913, 1465, 1465, 1465, 1465, 1537, + /* 210 */ 1465, 1465, 1537, 1732, 1465, 1537, 1465, 1465, 1465, 1537, + /* 220 */ 1913, 1465, 1465, 1465, 1465, 1913, 1465, 1465, 1465, 1537, /* 230 */ 1465, 1537, 1465, 1465, 1465, 1915, 1913, 1465, 1465, 1915, /* 240 */ 1913, 1465, 1465, 1465, 1927, 1923, 1915, 1931, 1929, 1906, /* 250 */ 1904, 1891, 1465, 1465, 1945, 1941, 1957, 1945, 1941, 1945, @@ -1858,7 +1858,7 @@ static const char *const yyRuleName[] = { /* 231 */ "table_name_cond ::= table_name", /* 232 */ "from_db_opt ::=", /* 233 */ "from_db_opt ::= FROM db_name", - /* 234 */ "cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options", + /* 234 */ "cmd ::= CREATE SMA INDEX not_exists_opt index_name ON full_table_name index_options", /* 235 */ "cmd ::= DROP INDEX exists_opt index_name", /* 236 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt", /* 237 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt", @@ -2949,7 +2949,7 @@ static const struct { { 307, -1 }, /* (231) table_name_cond ::= table_name */ { 308, 0 }, /* (232) from_db_opt ::= */ { 308, -2 }, /* (233) from_db_opt ::= FROM db_name */ - { 257, -8 }, /* (234) cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ + { 257, -8 }, /* (234) cmd ::= CREATE SMA INDEX not_exists_opt index_name ON full_table_name index_options */ { 257, -4 }, /* (235) cmd ::= DROP INDEX exists_opt index_name */ { 310, -10 }, /* (236) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */ { 310, -12 }, /* (237) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */ @@ -4064,8 +4064,8 @@ static YYACTIONTYPE yy_reduce( case 233: /* from_db_opt ::= FROM db_name */ { yymsp[-1].minor.yy712 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy129); } break; - case 234: /* cmd ::= CREATE SMA INDEX not_exists_opt index_name ON table_name index_options */ -{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy337, &yymsp[-3].minor.yy129, &yymsp[-1].minor.yy129, NULL, yymsp[0].minor.yy712); } + case 234: /* cmd ::= CREATE SMA INDEX not_exists_opt index_name ON full_table_name index_options */ +{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy337, &yymsp[-3].minor.yy129, yymsp[-1].minor.yy712, NULL, yymsp[0].minor.yy712); } break; case 235: /* cmd ::= DROP INDEX exists_opt index_name */ { pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy337, &yymsp[0].minor.yy129); } @@ -4805,6 +4805,8 @@ static void yy_syntax_error( } else { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INCOMPLETE_SQL); } + } else if (TSDB_CODE_PAR_DB_NOT_SPECIFIED == pCxt->errCode && TK_NK_FLOAT == TOKEN.type) { + pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, TOKEN.z); } /************ End %syntax_error code ******************************************/ ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp index c91e09c825540a1c8fc8299708420fd43f65c69d..ebf8012cf752a0e42f5d86354ecc60e057ebf1bc 100644 --- a/source/libs/parser/test/parSelectTest.cpp +++ b/source/libs/parser/test/parSelectTest.cpp @@ -70,6 +70,8 @@ TEST_F(ParserSelectTest, condition) { run("SELECT c1 FROM t1 WHERE NOT ts in (true, false)"); run("SELECT * FROM t1 WHERE c1 > 10 and c1 is not null"); + + run("SELECT * FROM t1 WHERE TBNAME like 'fda%' or TS > '2021-05-05 18:19:01.000'"); } TEST_F(ParserSelectTest, pseudoColumn) { diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 577266a697fbfa545beca9e03a22c297b53e299b..c02ef647073c43f85406f8ae864e0e184d55e915 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -436,7 +436,7 @@ static int32_t pushDownCondOptDealScan(SOptimizeContext* pCxt, SScanLogicNode* p SNode* pPrimaryKeyCond = NULL; SNode* pOtherCond = NULL; - int32_t code = nodesPartitionCond(&pScan->node.pConditions, &pPrimaryKeyCond, &pScan->pTagIndexCond, &pScan->pTagCond, + int32_t code = filterPartitionCond(&pScan->node.pConditions, &pPrimaryKeyCond, &pScan->pTagIndexCond, &pScan->pTagCond, &pOtherCond); if (TSDB_CODE_SUCCESS == code && NULL != pScan->pTagCond) { code = pushDownCondOptRebuildTbanme(&pScan->pTagCond); @@ -2427,5 +2427,8 @@ static int32_t applyOptimizeRule(SPlanContext* pCxt, SLogicSubplan* pLogicSubpla } int32_t optimizeLogicPlan(SPlanContext* pCxt, SLogicSubplan* pLogicSubplan) { + if (SUBPLAN_TYPE_MODIFY == pLogicSubplan->subplanType && NULL == pLogicSubplan->pNode->pChildren) { + return TSDB_CODE_SUCCESS; + } return applyOptimizeRule(pCxt, pLogicSubplan); } diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index bc5e50218b158967458b49fd450dcf24eeba2f2e..74c78b970d6edf973d5ca5370f936c96a0466406 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -248,12 +248,25 @@ static bool stbSplNeedSplitWindow(bool streamQuery, SLogicNode* pNode) { return false; } +static bool stbSplNeedSplitJoin(bool streamQuery, SJoinLogicNode* pJoin) { + if (pJoin->isSingleTableJoin) { + return false; + } + SNode* pChild = NULL; + FOREACH(pChild, pJoin->node.pChildren) { + if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pChild) && QUERY_NODE_LOGIC_PLAN_JOIN != nodeType(pChild)) { + return false; + } + } + return true; +} + static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) { switch (nodeType(pNode)) { case QUERY_NODE_LOGIC_PLAN_SCAN: return stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pNode); case QUERY_NODE_LOGIC_PLAN_JOIN: - return !(((SJoinLogicNode*)pNode)->isSingleTableJoin); + return stbSplNeedSplitJoin(streamQuery, (SJoinLogicNode*)pNode); case QUERY_NODE_LOGIC_PLAN_PARTITION: return stbSplIsMultiTbScanChild(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_AGG: @@ -763,9 +776,13 @@ static SNode* stbSplCreateColumnNode(SExprNode* pExpr) { return NULL; } if (QUERY_NODE_COLUMN == nodeType(pExpr)) { + strcpy(pCol->dbName, ((SColumnNode*)pExpr)->dbName); + strcpy(pCol->tableName, ((SColumnNode*)pExpr)->tableName); strcpy(pCol->tableAlias, ((SColumnNode*)pExpr)->tableAlias); + strcpy(pCol->colName, ((SColumnNode*)pExpr)->colName); + } else { + strcpy(pCol->colName, pExpr->aliasName); } - strcpy(pCol->colName, pExpr->aliasName); strcpy(pCol->node.aliasName, pExpr->aliasName); pCol->node.resType = pExpr->resType; return (SNode*)pCol; diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c index f3174ce70a14f335c945ba2f539deb85c1285d0b..0554779746ac6fc6f0ebf2badeee55d24f9b9ef5 100644 --- a/source/libs/planner/src/planner.c +++ b/source/libs/planner/src/planner.c @@ -42,9 +42,6 @@ int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNo if (TSDB_CODE_SUCCESS == code) { code = createPhysiPlan(pCxt, pLogicPlan, pPlan, pExecNodeList); } - if (TSDB_CODE_SUCCESS == code) { - dumpQueryPlan(*pPlan); - } nodesDestroyNode((SNode*)pLogicSubplan); nodesDestroyNode((SNode*)pLogicPlan); diff --git a/source/libs/planner/test/planOrderByTest.cpp b/source/libs/planner/test/planOrderByTest.cpp index 13dfbad78cfc1f2957bd48ef1e163a9c887881e0..6fef080bc14c9475e1e8e87347a01afc26c4dbb1 100644 --- a/source/libs/planner/test/planOrderByTest.cpp +++ b/source/libs/planner/test/planOrderByTest.cpp @@ -39,6 +39,8 @@ TEST_F(PlanOrderByTest, expr) { useDb("root", "test"); run("SELECT * FROM t1 ORDER BY c1 + 10, c2"); + + run("SELECT c1 FROM st1 ORDER BY ts, _C0"); } TEST_F(PlanOrderByTest, nullsOrder) { diff --git a/source/libs/planner/test/planSubqueryTest.cpp b/source/libs/planner/test/planSubqueryTest.cpp index 16dd91846c6b0cca12135238f73dc382be490dfc..522f1bf746dc7ee1ac8d4230a2b5341f79b7a49f 100644 --- a/source/libs/planner/test/planSubqueryTest.cpp +++ b/source/libs/planner/test/planSubqueryTest.cpp @@ -64,6 +64,12 @@ TEST_F(PlanSubqeuryTest, innerFill) { "WHERE ts > '2022-04-06 00:00:00'"); } +TEST_F(PlanSubqeuryTest, innerOrderBy) { + useDb("root", "test"); + + run("SELECT c2 FROM (SELECT c2 FROM st1 ORDER BY c1, _rowts)"); +} + TEST_F(PlanSubqeuryTest, outerInterval) { useDb("root", "test"); @@ -73,3 +79,9 @@ TEST_F(PlanSubqeuryTest, outerInterval) { run("SELECT COUNT(*) FROM (SELECT ts, TOP(c1, 10) FROM st1s1) INTERVAL(5s)"); } + +TEST_F(PlanSubqeuryTest, outerPartition) { + useDb("root", "test"); + + run("SELECT c1, COUNT(*) FROM (SELECT ts, c1 FROM st1) PARTITION BY c1"); +} diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 29773b95c354fd37a1c556039c5504cb85132197..04328fda9ca22532045f9e8dabbab07e0bcec2af 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -22,6 +22,7 @@ #include "tcompare.h" #include "tdatablock.h" #include "ttime.h" +#include "functionMgt.h" OptrStr gOptrStr[] = { {0, "invalid"}, @@ -97,7 +98,7 @@ rangeCompFunc filterGetRangeCompFunc(char sflag, char eflag) { if (FILTER_GET_FLAG(eflag, RANGE_FLG_EXCLUDE)) { return filterRangeCompLe; } - + return filterRangeCompLi; } @@ -105,7 +106,7 @@ rangeCompFunc filterGetRangeCompFunc(char sflag, char eflag) { if (FILTER_GET_FLAG(sflag, RANGE_FLG_EXCLUDE)) { return filterRangeCompGe; } - + return filterRangeCompGi; } @@ -130,7 +131,7 @@ rangeCompFunc gRangeCompare[] = {filterRangeCompee, filterRangeCompei, filterRan int8_t filterGetRangeCompFuncFromOptrs(uint8_t optr, uint8_t optr2) { if (optr2) { - assert(optr2 == OP_TYPE_LOWER_THAN || optr2 == OP_TYPE_LOWER_EQUAL); + assert(optr2 == OP_TYPE_LOWER_THAN || optr2 == OP_TYPE_LOWER_EQUAL); if (optr == OP_TYPE_GREATER_THAN) { if (optr2 == OP_TYPE_LOWER_THAN) { @@ -164,9 +165,9 @@ int8_t filterGetRangeCompFuncFromOptrs(uint8_t optr, uint8_t optr2) { } __compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val, compareInt64Val, compareFloatVal, - compareDoubleVal, compareLenPrefixedStr, compareStrPatternMatch, compareChkInString, compareWStrPatternMatch, + compareDoubleVal, compareLenPrefixedStr, compareStrPatternMatch, compareChkInString, compareWStrPatternMatch, compareLenPrefixedWStr, compareUint8Val, compareUint16Val, compareUint32Val, compareUint64Val, - setChkInBytes1, setChkInBytes2, setChkInBytes4, setChkInBytes8, compareStrRegexCompMatch, + setChkInBytes1, setChkInBytes2, setChkInBytes4, setChkInBytes8, compareStrRegexCompMatch, compareStrRegexCompNMatch, setChkNotInBytes1, setChkNotInBytes2, setChkNotInBytes4, setChkNotInBytes8, compareChkNotInString, compareStrPatternNotMatch, compareWStrPatternNotMatch }; @@ -177,20 +178,20 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { if (optr == OP_TYPE_IN && (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR)) { switch (type) { case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: return 15; case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_USMALLINT: return 16; case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_UINT: - case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_FLOAT: return 17; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - case TSDB_DATA_TYPE_DOUBLE: - case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: return 18; case TSDB_DATA_TYPE_JSON: terrno = TSDB_CODE_QRY_JSON_IN_ERROR; @@ -203,20 +204,20 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { if (optr == OP_TYPE_NOT_IN && (type != TSDB_DATA_TYPE_BINARY && type != TSDB_DATA_TYPE_NCHAR)) { switch (type) { case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: - case TSDB_DATA_TYPE_UTINYINT: + case TSDB_DATA_TYPE_TINYINT: + case TSDB_DATA_TYPE_UTINYINT: return 21; case TSDB_DATA_TYPE_SMALLINT: case TSDB_DATA_TYPE_USMALLINT: return 22; case TSDB_DATA_TYPE_INT: case TSDB_DATA_TYPE_UINT: - case TSDB_DATA_TYPE_FLOAT: + case TSDB_DATA_TYPE_FLOAT: return 23; - case TSDB_DATA_TYPE_BIGINT: - case TSDB_DATA_TYPE_UBIGINT: - case TSDB_DATA_TYPE_DOUBLE: - case TSDB_DATA_TYPE_TIMESTAMP: + case TSDB_DATA_TYPE_BIGINT: + case TSDB_DATA_TYPE_UBIGINT: + case TSDB_DATA_TYPE_DOUBLE: + case TSDB_DATA_TYPE_TIMESTAMP: return 24; case TSDB_DATA_TYPE_JSON: terrno = TSDB_CODE_QRY_JSON_IN_ERROR; @@ -256,10 +257,10 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { } else { /* normal relational comparFn */ comparFn = 6; } - + break; } - + case TSDB_DATA_TYPE_NCHAR: { if (optr == OP_TYPE_MATCH) { comparFn = 19; @@ -288,7 +289,7 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) { comparFn = 0; break; } - + return comparFn; } @@ -307,7 +308,7 @@ static FORCE_INLINE int32_t filterCompareGroupCtx(const void *pLeft, const void int32_t filterInitUnitsFields(SFilterInfo *info) { info->unitSize = FILTER_DEFAULT_UNIT_SIZE; info->units = taosMemoryCalloc(info->unitSize, sizeof(SFilterUnit)); - + info->fields[FLD_TYPE_COLUMN].num = 0; info->fields[FLD_TYPE_COLUMN].size = FILTER_DEFAULT_FIELD_SIZE; info->fields[FLD_TYPE_COLUMN].fields = taosMemoryCalloc(info->fields[FLD_TYPE_COLUMN].size, sizeof(SFilterField)); @@ -320,7 +321,7 @@ int32_t filterInitUnitsFields(SFilterInfo *info) { static FORCE_INLINE SFilterRangeNode* filterNewRange(SFilterRangeCtx *ctx, SFilterRange* ra) { SFilterRangeNode *r = NULL; - + if (ctx->rf) { r = ctx->rf; ctx->rf = ctx->rf->next; @@ -340,7 +341,7 @@ void* filterInitRangeCtx(int32_t type, int32_t options) { qError("not supported range type:%d", type); return NULL; } - + SFilterRangeCtx *ctx = taosMemoryCalloc(1, sizeof(SFilterRangeCtx)); ctx->type = type; @@ -365,7 +366,7 @@ int32_t filterResetRangeCtx(SFilterRangeCtx *ctx) { ctx->isrange = false; SFilterRangeNode *r = ctx->rf; - + while (r && r->next) { r = r->next; } @@ -401,7 +402,7 @@ int32_t filterConvertRange(SFilterRangeCtx *cur, SFilterRange *ra, bool *notNull } } - + if (FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL) && FILTER_GET_FLAG(ra->eflag, RANGE_FLG_NULL)) { *notNull = true; } else { @@ -437,7 +438,7 @@ int32_t filterAddRangeImpl(void* h, SFilterRange* ra, int32_t optr) { SFilterRangeCtx *ctx = (SFilterRangeCtx *)h; if (ctx->rs == NULL) { - if ((FILTER_GET_FLAG(ctx->status, MR_ST_START) == 0) + if ((FILTER_GET_FLAG(ctx->status, MR_ST_START) == 0) || (FILTER_GET_FLAG(ctx->status, MR_ST_ALL) && (optr == LOGIC_COND_TYPE_AND)) || ((!FILTER_GET_FLAG(ctx->status, MR_ST_ALL)) && (optr == LOGIC_COND_TYPE_OR))) { APPEND_RANGE(ctx, ctx->rs, ra); @@ -488,23 +489,23 @@ int32_t filterAddRangeImpl(void* h, SFilterRange* ra, int32_t optr) { //TSDB_RELATION_OR - + bool smerged = false; bool emerged = false; while (r != NULL) { cr = ctx->pCompareFunc(&r->ra.s, &ra->e); - if (FILTER_GREATER(cr, r->ra.sflag, ra->eflag)) { + if (FILTER_GREATER(cr, r->ra.sflag, ra->eflag)) { if (emerged == false) { INSERT_RANGE(ctx, r, ra); } - + break; } if (smerged == false) { cr = ctx->pCompareFunc(&ra->s, &r->ra.e); - if (FILTER_GREATER(cr, ra->sflag, r->ra.eflag)) { + if (FILTER_GREATER(cr, ra->sflag, r->ra.eflag)) { if (r->next) { r= r->next; continue; @@ -515,23 +516,23 @@ int32_t filterAddRangeImpl(void* h, SFilterRange* ra, int32_t optr) { } cr = ctx->pCompareFunc(&r->ra.s, &ra->s); - if (FILTER_GREATER(cr, r->ra.sflag, ra->sflag)) { - SIMPLE_COPY_VALUES((char *)&r->ra.s, &ra->s); + if (FILTER_GREATER(cr, r->ra.sflag, ra->sflag)) { + SIMPLE_COPY_VALUES((char *)&r->ra.s, &ra->s); cr == 0 ? (r->ra.sflag &= ra->sflag) : (r->ra.sflag = ra->sflag); } smerged = true; } - + if (emerged == false) { cr = ctx->pCompareFunc(&ra->e, &r->ra.e); if (FILTER_GREATER(cr, ra->eflag, r->ra.eflag)) { SIMPLE_COPY_VALUES((char *)&r->ra.e, &ra->e); - if (cr == 0) { + if (cr == 0) { r->ra.eflag &= ra->eflag; break; } - + r->ra.eflag = ra->eflag; emerged = true; r = r->next; @@ -552,7 +553,7 @@ int32_t filterAddRangeImpl(void* h, SFilterRange* ra, int32_t optr) { SIMPLE_COPY_VALUES(&r->prev->ra.e, (char *)&r->ra.e); cr == 0 ? (r->prev->ra.eflag &= r->ra.eflag) : (r->prev->ra.eflag = r->ra.eflag); FREE_RANGE(ctx, r); - + break; } } @@ -570,12 +571,12 @@ int32_t filterAddRangeImpl(void* h, SFilterRange* ra, int32_t optr) { } } - return TSDB_CODE_SUCCESS; + return TSDB_CODE_SUCCESS; } int32_t filterAddRange(void* h, SFilterRange* ra, int32_t optr) { SFilterRangeCtx *ctx = (SFilterRangeCtx *)h; - + if (FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) { SIMPLE_COPY_VALUES(&ra->s, getDataMin(ctx->type)); //FILTER_CLR_FLAG(ra->sflag, RA_NULL); @@ -601,7 +602,7 @@ int32_t filterAddRangeCtx(void *dst, void *src, int32_t optr) { } SFilterRangeNode *r = sctx->rs; - + while (r) { filterAddRange(dctx, &r->ra, optr); r = r->next; @@ -615,14 +616,14 @@ int32_t filterCopyRangeCtx(void *dst, void *src) { SFilterRangeCtx *sctx = (SFilterRangeCtx *)src; dctx->status = sctx->status; - + dctx->isnull = sctx->isnull; dctx->notnull = sctx->notnull; dctx->isrange = sctx->isrange; SFilterRangeNode *r = sctx->rs; SFilterRangeNode *dr = dctx->rs; - + while (r) { APPEND_RANGE(dctx, dr, &r->ra); if (dr == NULL) { @@ -648,7 +649,7 @@ int32_t filterFinishRange(void* h) { if (FILTER_GET_FLAG(ctx->options, FLT_OPTION_TIMESTAMP)) { SFilterRangeNode *r = ctx->rs; SFilterRangeNode *rn = NULL; - + while (r && r->next) { int64_t tmp = 1; operateVal(&tmp, &r->ra.e, &tmp, OP_TYPE_ADD, ctx->type); @@ -657,10 +658,10 @@ int32_t filterFinishRange(void* h) { SIMPLE_COPY_VALUES((char *)&r->next->ra.s, (char *)&r->ra.s); FREE_RANGE(ctx, r); r = rn; - + continue; } - + r = r->next; } } @@ -672,13 +673,13 @@ int32_t filterFinishRange(void* h) { int32_t filterGetRangeNum(void* h, int32_t* num) { filterFinishRange(h); - + SFilterRangeCtx *ctx = (SFilterRangeCtx *)h; *num = 0; SFilterRangeNode *r = ctx->rs; - + while (r) { ++(*num); r = r->next; @@ -694,7 +695,7 @@ int32_t filterGetRangeRes(void* h, SFilterRange *ra) { SFilterRangeCtx *ctx = (SFilterRangeCtx *)h; uint32_t num = 0; SFilterRangeNode* r = ctx->rs; - + while (r) { if (num) { ra->e = r->ra.e; @@ -711,7 +712,7 @@ int32_t filterGetRangeRes(void* h, SFilterRange *ra) { qError("no range result"); return TSDB_CODE_QRY_APP_ERROR; } - + return TSDB_CODE_SUCCESS; } @@ -739,7 +740,7 @@ int32_t filterSourceRangeFromCtx(SFilterRangeCtx *ctx, void *sctx, int32_t optr, if (!(optr == LOGIC_COND_TYPE_OR && ctx->notnull)) { filterAddRangeCtx(ctx, src, optr); } - + if (FILTER_GET_FLAG(ctx->status, MR_ST_ALL)) { *all = true; } @@ -754,11 +755,11 @@ int32_t filterFreeRangeCtx(void* h) { if (h == NULL) { return TSDB_CODE_SUCCESS; } - + SFilterRangeCtx *ctx = (SFilterRangeCtx *)h; SFilterRangeNode *r = ctx->rs; SFilterRangeNode *rn = NULL; - + while (r) { rn = r->next; taosMemoryFree(r); @@ -784,10 +785,10 @@ int32_t filterDetachCnfGroup(SFilterGroup *gp1, SFilterGroup *gp2, SArray* group gp.unitNum = gp1->unitNum + gp2->unitNum; gp.unitIdxs = taosMemoryCalloc(gp.unitNum, sizeof(*gp.unitIdxs)); memcpy(gp.unitIdxs, gp1->unitIdxs, gp1->unitNum * sizeof(*gp.unitIdxs)); - memcpy(gp.unitIdxs + gp1->unitNum, gp2->unitIdxs, gp2->unitNum * sizeof(*gp.unitIdxs)); + memcpy(gp.unitIdxs + gp1->unitNum, gp2->unitIdxs, gp2->unitNum * sizeof(*gp.unitIdxs)); gp.unitFlags = NULL; - + taosArrayPush(group, &gp); return TSDB_CODE_SUCCESS; @@ -801,7 +802,7 @@ int32_t filterDetachCnfGroups(SArray* group, SArray* left, SArray* right) { if (taosArrayGetSize(left) <= 0) { if (taosArrayGetSize(right) <= 0) { fltError("both groups are empty"); - FLT_ERR_RET(TSDB_CODE_QRY_APP_ERROR); + FLT_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } SFilterGroup *gp = NULL; @@ -812,7 +813,7 @@ int32_t filterDetachCnfGroups(SArray* group, SArray* left, SArray* right) { return TSDB_CODE_SUCCESS; } - if (taosArrayGetSize(right) <= 0) { + if (taosArrayGetSize(right) <= 0) { SFilterGroup *gp = NULL; while ((gp = (SFilterGroup *)taosArrayPop(left)) != NULL) { taosArrayPush(group, gp); @@ -820,10 +821,10 @@ int32_t filterDetachCnfGroups(SArray* group, SArray* left, SArray* right) { return TSDB_CODE_SUCCESS; } - + for (int32_t l = 0; l < leftSize; ++l) { SFilterGroup *gp1 = taosArrayGet(left, l); - + for (int32_t r = 0; r < rightSize; ++r) { SFilterGroup *gp2 = taosArrayGet(right, r); @@ -877,15 +878,15 @@ int32_t filterAddField(SFilterInfo *info, void *desc, void **data, int32_t type, idx = filterGetFiledByData(info, type, *data, dataLen); } } - + if (idx < 0) { idx = *num; if (idx >= info->fields[type].size) { info->fields[type].size += FILTER_DEFAULT_FIELD_SIZE; info->fields[type].fields = taosMemoryRealloc(info->fields[type].fields, info->fields[type].size * sizeof(SFilterField)); } - - info->fields[type].fields[idx].flag = type; + + info->fields[type].fields[idx].flag = type; info->fields[type].fields[idx].desc = desc; info->fields[type].fields[idx].data = data ? *data : NULL; @@ -899,7 +900,7 @@ int32_t filterAddField(SFilterInfo *info, void *desc, void **data, int32_t type, if (info->pctx.valHash == NULL) { info->pctx.valHash = taosHashInit(FILTER_DEFAULT_GROUP_SIZE * FILTER_DEFAULT_VALUE_SIZE, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, false); } - + taosHashPut(info->pctx.valHash, *data, dataLen, &idx, sizeof(idx)); } } else { @@ -910,7 +911,7 @@ int32_t filterAddField(SFilterInfo *info, void *desc, void **data, int32_t type, fid->type = type; fid->idx = idx; - + return TSDB_CODE_SUCCESS; } @@ -928,11 +929,11 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f fltError("empty node"); FLT_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } - + if (nodeType(node) != QUERY_NODE_COLUMN && nodeType(node) != QUERY_NODE_VALUE && nodeType(node) != QUERY_NODE_NODE_LIST) { FLT_ERR_RET(TSDB_CODE_QRY_APP_ERROR); } - + int32_t type; void *v; @@ -945,7 +946,7 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f } filterAddField(info, v, NULL, type, fid, 0, true); - + return TSDB_CODE_SUCCESS; } @@ -972,7 +973,7 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi } SFilterUnit *u = &info->units[info->unitNum]; - + u->compare.optr = optr; u->left = *left; if (right) { @@ -980,7 +981,7 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi } if (u->right.type == FLD_TYPE_VALUE) { - SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u); + SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u); assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE)); } else { int32_t paramNum = scalarGetOperatorParamNum(optr); @@ -989,10 +990,10 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi return TSDB_CODE_QRY_APP_ERROR; } } - + SFilterField *col = FILTER_UNIT_LEFT_FIELD(info, u); assert(FILTER_GET_FLAG(col->flag, FLD_TYPE_COLUMN)); - + info->units[info->unitNum].compare.type = FILTER_GET_COL_FIELD_TYPE(col); info->units[info->unitNum].compare.precision = FILTER_GET_COL_FIELD_PRECISION(col); @@ -1000,12 +1001,12 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) { int64_t v = 0; - FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1); + FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1); taosHashPut(info->pctx.unitHash, &v, sizeof(v), uidx, sizeof(*uidx)); } - + ++info->unitNum; - + return TSDB_CODE_SUCCESS; } @@ -1016,7 +1017,7 @@ int32_t filterAddUnitToGroup(SFilterGroup *group, uint32_t unitIdx) { group->unitSize += FILTER_DEFAULT_UNIT_SIZE; group->unitIdxs = taosMemoryRealloc(group->unitIdxs, group->unitSize * sizeof(*group->unitIdxs)); } - + group->unitIdxs[group->unitNum++] = unitIdx; return TSDB_CODE_SUCCESS; @@ -1039,10 +1040,10 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode* tree, SArray *group) { SScalarParam out = {.columnData = taosMemoryCalloc(1, sizeof(SColumnInfoData))}; out.columnData->info.type = type; out.columnData->info.bytes = tDataTypes[type].bytes; - + for (int32_t i = 0; i < listNode->pNodeList->length; ++i) { SValueNode *valueNode = (SValueNode *)cell->pNode; - if (valueNode->node.resType.type != type) { + if (valueNode->node.resType.type != type) { int32_t overflow = 0; code = doConvertDataType(valueNode, &out, &overflow); if (code) { @@ -1054,7 +1055,7 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode* tree, SArray *group) { cell = cell->pNext; continue; } - + len = tDataTypes[type].bytes; filterAddField(info, NULL, (void**) &out.columnData->pData, FLD_TYPE_VALUE, &right, len, true); @@ -1065,13 +1066,13 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode* tree, SArray *group) { FLT_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY); } memcpy(data, nodesGetValueFromNode(valueNode), tDataTypes[type].bytes); - filterAddField(info, NULL, (void**) &data, FLD_TYPE_VALUE, &right, len, true); + filterAddField(info, NULL, (void**) &data, FLD_TYPE_VALUE, &right, len, true); } filterAddUnit(info, OP_TYPE_EQUAL, &left, &right, &uidx); - + SFilterGroup fgroup = {0}; filterAddUnitToGroup(&fgroup, uidx); - + taosArrayPush(group, &fgroup); cell = cell->pNext; @@ -1080,14 +1081,14 @@ int32_t fltAddGroupUnitFromNode(SFilterInfo *info, SNode* tree, SArray *group) { taosMemoryFree(out.columnData); } else { filterAddFieldFromNode(info, node->pRight, &right); - + FLT_ERR_RET(filterAddUnit(info, node->opType, &left, &right, &uidx)); SFilterGroup fgroup = {0}; filterAddUnitToGroup(&fgroup, uidx); - + taosArrayPush(group, &fgroup); } - + return TSDB_CODE_SUCCESS; } @@ -1099,7 +1100,7 @@ int32_t filterAddUnitFromUnit(SFilterInfo *dst, SFilterInfo *src, SFilterUnit* u filterAddField(dst, FILTER_UNIT_COL_DESC(src, u), NULL, FLD_TYPE_COLUMN, &left, 0, false); SFilterField *t = FILTER_UNIT_LEFT_FIELD(src, u); - + if (u->right.type == FLD_TYPE_VALUE) { void *data = FILTER_UNIT_VAL_DATA(src, u); if (IS_VAR_DATA_TYPE(type)) { @@ -1115,7 +1116,7 @@ int32_t filterAddUnitFromUnit(SFilterInfo *dst, SFilterInfo *src, SFilterUnit* u filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, tDataTypes[type].bytes, false); } - flag = FLD_DATA_NO_FREE; + flag = FLD_DATA_NO_FREE; t = FILTER_UNIT_RIGHT_FIELD(src, u); FILTER_SET_FLAG(t->flag, flag); } else { @@ -1151,7 +1152,7 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan return TSDB_CODE_SUCCESS; } - if (ctx->notnull) { + if (ctx->notnull) { assert(ctx->isnull == false && ctx->isrange == false); filterAddUnit(dst, OP_TYPE_IS_NOT_NULL, &left, NULL, &uidx); filterAddUnitToGroup(g, uidx); @@ -1166,7 +1167,7 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan assert(ctx->rs && ctx->rs->next == NULL); SFilterRange *ra = &ctx->rs->ra; - + assert(!((FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) && (FILTER_GET_FLAG(ra->eflag, RANGE_FLG_NULL)))); if ((!FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) && (!FILTER_GET_FLAG(ra->eflag, RANGE_FLG_NULL))) { @@ -1177,7 +1178,7 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, tDataTypes[type].bytes, true); filterAddUnit(dst, OP_TYPE_EQUAL, &left, &right, &uidx); filterAddUnitToGroup(g, uidx); - return TSDB_CODE_SUCCESS; + return TSDB_CODE_SUCCESS; } else { void *data = taosMemoryMalloc(sizeof(int64_t)); SIMPLE_COPY_VALUES(data, &ra->s); @@ -1185,14 +1186,14 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan void *data2 = taosMemoryMalloc(sizeof(int64_t)); SIMPLE_COPY_VALUES(data2, &ra->e); filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true); - + filterAddUnit(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx); filterAddUnitRight(dst, FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx); filterAddUnitToGroup(g, uidx); - return TSDB_CODE_SUCCESS; + return TSDB_CODE_SUCCESS; } } - + if (!FILTER_GET_FLAG(ra->sflag, RANGE_FLG_NULL)) { void *data = taosMemoryMalloc(sizeof(int64_t)); SIMPLE_COPY_VALUES(data, &ra->s); @@ -1207,28 +1208,28 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, tDataTypes[type].bytes, true); filterAddUnit(dst, FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &left, &right, &uidx); filterAddUnitToGroup(g, uidx); - } + } - return TSDB_CODE_SUCCESS; - } + return TSDB_CODE_SUCCESS; + } // OR PROCESS - + SFilterGroup ng = {0}; g = &ng; assert(ctx->isnull || ctx->notnull || ctx->isrange); - + if (ctx->isnull) { filterAddUnit(dst, OP_TYPE_IS_NULL, &left, NULL, &uidx); - filterAddUnitToGroup(g, uidx); + filterAddUnitToGroup(g, uidx); taosArrayPush(res, g); } - + if (ctx->notnull) { assert(!ctx->isrange); memset(g, 0, sizeof(*g)); - + filterAddUnit(dst, OP_TYPE_IS_NOT_NULL, &left, NULL, &uidx); filterAddUnitToGroup(g, uidx); taosArrayPush(res, g); @@ -1241,7 +1242,7 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan } SFilterRangeNode *r = ctx->rs; - + while (r) { memset(g, 0, sizeof(*g)); @@ -1260,19 +1261,19 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan void *data2 = taosMemoryMalloc(sizeof(int64_t)); SIMPLE_COPY_VALUES(data2, &r->ra.e); filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true); - + filterAddUnit(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx); filterAddUnitRight(dst, FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx); filterAddUnitToGroup(g, uidx); } taosArrayPush(res, g); - + r = r->next; - + continue; } - + if (!FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_NULL)) { void *data = taosMemoryMalloc(sizeof(int64_t)); SIMPLE_COPY_VALUES(data, &r->ra.s); @@ -1280,10 +1281,10 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan filterAddUnit(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx); filterAddUnitToGroup(g, uidx); } - + if (!FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_NULL)) { void *data = taosMemoryMalloc(sizeof(int64_t)); - SIMPLE_COPY_VALUES(data, &r->ra.e); + SIMPLE_COPY_VALUES(data, &r->ra.e); filterAddField(dst, NULL, &data, FLD_TYPE_VALUE, &right, tDataTypes[type].bytes, true); filterAddUnit(dst, FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &left, &right, &uidx); filterAddUnitToGroup(g, uidx); @@ -1306,7 +1307,7 @@ static void filterFreeGroup(void *pItem) { if (pItem == NULL) { return; } - + SFilterGroup* p = (SFilterGroup*) pItem; taosMemoryFreeClear(p->unitIdxs); taosMemoryFreeClear(p->unitFlags); @@ -1328,17 +1329,17 @@ EDealRes fltTreeToGroup(SNode* pNode, void* pContext) { for (int32_t i = 0; i < node->pParameterList->length; ++i) { newGroup = taosArrayInit(4, sizeof(SFilterGroup)); resGroup = taosArrayInit(4, sizeof(SFilterGroup)); - + SFltBuildGroupCtx tctx = {.info = ctx->info, .group = newGroup}; nodesWalkExpr(cell->pNode, fltTreeToGroup, (void *)&tctx); FLT_ERR_JRET(tctx.code); - + FLT_ERR_JRET(filterDetachCnfGroups(resGroup, preGroup, newGroup)); - + taosArrayDestroyEx(newGroup, filterFreeGroup); newGroup = NULL; taosArrayDestroyEx(preGroup, filterFreeGroup); - + preGroup = resGroup; resGroup = NULL; @@ -1348,7 +1349,7 @@ EDealRes fltTreeToGroup(SNode* pNode, void* pContext) { taosArrayAddAll(ctx->group, preGroup); taosArrayDestroy(preGroup); - + return DEAL_RES_IGNORE_CHILD; } @@ -1357,23 +1358,23 @@ EDealRes fltTreeToGroup(SNode* pNode, void* pContext) { for (int32_t i = 0; i < node->pParameterList->length; ++i) { nodesWalkExpr(cell->pNode, fltTreeToGroup, (void *)pContext); FLT_ERR_JRET(ctx->code); - + cell = cell->pNext; } - + return DEAL_RES_IGNORE_CHILD; } ctx->code = TSDB_CODE_QRY_APP_ERROR; - + fltError("invalid condition type, type:%d", node->condType); return DEAL_RES_ERROR; } if (QUERY_NODE_OPERATOR == nType) { - FLT_ERR_JRET(fltAddGroupUnitFromNode(ctx->info, pNode, ctx->group)); - + FLT_ERR_JRET(fltAddGroupUnitFromNode(ctx->info, pNode, ctx->group)); + return DEAL_RES_IGNORE_CHILD; } @@ -1496,7 +1497,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) SValueNode *var = (SValueNode *)field->desc; SDataType *dType = &var->node.resType; if (dType->type == TSDB_DATA_TYPE_VALUE_ARRAY) { - qDebug("VAL%d => [type:TS][val:[%" PRIi64"] - [%" PRId64 "]]", i, *(int64_t *)field->data, *(((int64_t *)field->data) + 1)); + qDebug("VAL%d => [type:TS][val:[%" PRIi64"] - [%" PRId64 "]]", i, *(int64_t *)field->data, *(((int64_t *)field->data) + 1)); } else { qDebug("VAL%d => [type:%d][val:%" PRIx64"]", i, dType->type, var->datum.i); //TODO } @@ -1512,7 +1513,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) int32_t len = 0; int32_t tlen = 0; char str[512] = {0}; - + SFilterField *left = FILTER_UNIT_LEFT_FIELD(info, unit); SColumnNode *refNode = (SColumnNode *)left->desc; if (unit->compare.optr >= 0 && unit->compare.optr <= OP_TYPE_JSON_CONTAINS){ @@ -1537,7 +1538,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) if (unit->compare.optr2 >= 0 && unit->compare.optr2 <= OP_TYPE_JSON_CONTAINS){ sprintf(str + strlen(str), "[%d][%d] %s [", refNode->dataBlockId, refNode->slotId, gOptrStr[unit->compare.optr2].str); } - + if (unit->right2.type == FLD_TYPE_VALUE && FILTER_UNIT_OPTR(unit) != OP_TYPE_IN) { SFilterField *right = FILTER_UNIT_RIGHT2_FIELD(info, unit); char *data = right->data; @@ -1551,7 +1552,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) } strcat(str, "]"); } - + qDebug("%s", str); //TODO } @@ -1575,10 +1576,10 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) for (uint32_t i = 0; i < info->colRangeNum; ++i) { SFilterRangeCtx *ctx = info->colRange[i]; qDebug("Column ID[%d] RANGE: isnull[%d],notnull[%d],range[%d]", ctx->colId, ctx->isnull, ctx->notnull, ctx->isrange); - if (ctx->isrange) { + if (ctx->isrange) { SFilterRangeNode *r = ctx->rs; while (r) { - char str[256] = {0}; + char str[256] = {0}; int32_t tlen = 0; if (FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_NULL)) { strcat(str,"(NULL)"); @@ -1595,8 +1596,8 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options) fltConverToStr(str + strlen(str), ctx->type, &r->ra.e, tlen > 32 ? 32 : tlen, &tlen); FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? strcat(str,")") : strcat(str,"]"); } - qDebug("range: %s", str); - + qDebug("range: %s", str); + r = r->next; } } @@ -1639,7 +1640,7 @@ void filterFreeColInfo(void *data) { if (info->type == RANGE_TYPE_VAR_HASH) { //TODO } else if (info->type == RANGE_TYPE_MR_CTX) { - filterFreeRangeCtx(info->info); + filterFreeRangeCtx(info->info); } else if (info->type == RANGE_TYPE_UNIT) { taosArrayDestroy((SArray *)info->info); } @@ -1713,14 +1714,14 @@ void filterFreeInfo(SFilterInfo *info) { for (uint32_t f = 0; f < info->fields[i].num; ++f) { filterFreeField(&info->fields[i].fields[f], i); } - + taosMemoryFreeClear(info->fields[i].fields); } for (uint32_t i = 0; i < info->groupNum; ++i) { - filterFreeGroup(&info->groups[i]); + filterFreeGroup(&info->groups[i]); } - + taosMemoryFreeClear(info->groups); taosMemoryFreeClear(info->units); @@ -1744,7 +1745,7 @@ void filterFreeInfo(SFilterInfo *info) { int32_t filterHandleValueExtInfo(SFilterUnit* unit, char extInfo) { assert(extInfo > 0 || extInfo < 0); - + uint8_t optr = FILTER_UNIT_OPTR(unit); switch (optr) { case OP_TYPE_GREATER_THAN: @@ -1773,7 +1774,7 @@ int32_t fltInitValFieldData(SFilterInfo *info) { assert(unit->compare.optr == FILTER_DUMMY_EMPTY_OPTR || scalarGetOperatorParamNum(unit->compare.optr) == 1); continue; } - + SFilterField* right = FILTER_UNIT_RIGHT_FIELD(info, unit); assert(FILTER_GET_FLAG(right->flag, FLD_TYPE_VALUE)); @@ -1781,7 +1782,7 @@ int32_t fltInitValFieldData(SFilterInfo *info) { uint32_t type = FILTER_UNIT_DATA_TYPE(unit); int8_t precision = FILTER_UNIT_DATA_PRECISION(unit); SFilterField* fi = right; - + SValueNode* var = (SValueNode *)fi->desc; if (var == NULL) { assert(fi->data != NULL); @@ -1796,7 +1797,7 @@ int32_t fltInitValFieldData(SFilterInfo *info) { } FILTER_SET_FLAG(fi->flag, FLD_DATA_IS_HASH); - + continue; } @@ -1949,11 +1950,11 @@ int32_t filterAddUnitRange(SFilterInfo *info, SFilterUnit* u, SFilterRangeCtx *c assert(type == TSDB_DATA_TYPE_BOOL); if (GET_INT8_VAL(val)) { SIMPLE_COPY_VALUES(&ra.s, &tmp); - SIMPLE_COPY_VALUES(&ra.e, &tmp); + SIMPLE_COPY_VALUES(&ra.e, &tmp); } else { *(bool *)&tmp = true; SIMPLE_COPY_VALUES(&ra.s, &tmp); - SIMPLE_COPY_VALUES(&ra.e, &tmp); + SIMPLE_COPY_VALUES(&ra.e, &tmp); } break; case OP_TYPE_EQUAL: @@ -1963,7 +1964,7 @@ int32_t filterAddUnitRange(SFilterInfo *info, SFilterUnit* u, SFilterRangeCtx *c default: assert(0); } - + filterAddRange(ctx, &ra, optr); return TSDB_CODE_SUCCESS; @@ -1977,13 +1978,13 @@ int32_t filterCompareRangeCtx(SFilterRangeCtx *ctx1, SFilterRangeCtx *ctx2, bool SFilterRangeNode *r1 = ctx1->rs; SFilterRangeNode *r2 = ctx2->rs; - + while (r1 && r2) { FLT_CHK_JMP(r1->ra.sflag != r2->ra.sflag); FLT_CHK_JMP(r1->ra.eflag != r2->ra.eflag); FLT_CHK_JMP(r1->ra.s != r2->ra.s); FLT_CHK_JMP(r1->ra.e != r2->ra.e); - + r1 = r1->next; r2 = r2->next; } @@ -2005,7 +2006,7 @@ int32_t filterMergeUnits(SFilterInfo *info, SFilterGroupCtx* gRes, uint32_t colI int32_t size = (int32_t)taosArrayGetSize(colArray); int32_t type = gRes->colInfo[colIdx].dataType; SFilterRangeCtx* ctx = filterInitRangeCtx(type, 0); - + for (uint32_t i = 0; i < size; ++i) { SFilterUnit* u = taosArrayGetP(colArray, i); uint8_t optr = FILTER_UNIT_OPTR(u); @@ -2044,7 +2045,7 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t uint32_t *colIdx = taosMemoryMalloc(info->fields[FLD_TYPE_COLUMN].num * sizeof(uint32_t)); uint32_t colIdxi = 0; uint32_t gResIdx = 0; - + for (uint32_t i = 0; i < info->groupNum; ++i) { SFilterGroup* g = info->groups + i; @@ -2052,7 +2053,7 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t gRes[gResIdx]->colInfo = taosMemoryCalloc(info->fields[FLD_TYPE_COLUMN].num, sizeof(SFilterColInfo)); colIdxi = 0; empty = false; - + for (uint32_t j = 0; j < g->unitNum; ++j) { SFilterUnit* u = FILTER_GROUP_UNIT(info, g, j); uint32_t cidx = FILTER_UNIT_COL_IDX(u); @@ -2066,7 +2067,7 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t FILTER_SET_FLAG(info->status, FI_STATUS_REWRITE); } } - + FILTER_PUSH_UNIT(gRes[gResIdx]->colInfo[cidx], u); } @@ -2092,10 +2093,10 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t FILTER_SET_FLAG(info->status, FI_STATUS_REWRITE); filterFreeGroupCtx(gRes[gResIdx]); gRes[gResIdx] = NULL; - + continue; } - + gRes[gResIdx]->colNum = colIdxi; FILTER_COPY_IDX(&gRes[gResIdx]->colIdx, colIdx, colIdxi); ++gResIdx; @@ -2104,7 +2105,7 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t taosMemoryFreeClear(colIdx); *gResNum = gResIdx; - + if (gResIdx == 0) { FILTER_SET_FLAG(info->status, FI_STATUS_EMPTY); } @@ -2115,12 +2116,12 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool *conflict) { uint32_t idx1 = 0, idx2 = 0, m = 0, n = 0; bool equal = false; - + for (; m < gRes1->colNum; ++m) { idx1 = gRes1->colIdx[m]; equal = false; - + for (; n < gRes2->colNum; ++n) { idx2 = gRes2->colIdx[n]; if (idx1 < idx2) { @@ -2145,7 +2146,7 @@ void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool return; } } - + ++n; equal = true; break; @@ -2184,7 +2185,7 @@ int32_t filterMergeTwoGroupsImpl(SFilterInfo *info, SFilterRangeCtx **ctx, int32 int32_t filterMergeTwoGroups(SFilterInfo *info, SFilterGroupCtx** gRes1, SFilterGroupCtx** gRes2, bool *all) { bool conflict = false; - + filterCheckColConflict(*gRes1, *gRes2, &conflict); if (conflict) { return TSDB_CODE_SUCCESS; @@ -2202,7 +2203,7 @@ int32_t filterMergeTwoGroups(SFilterInfo *info, SFilterGroupCtx** gRes1, SFilter for (; m < (*gRes1)->colNum; ++m) { idx1 = (*gRes1)->colIdx[m]; - + for (; n < (*gRes2)->colNum; ++n) { idx2 = (*gRes2)->colIdx[n]; @@ -2211,9 +2212,9 @@ int32_t filterMergeTwoGroups(SFilterInfo *info, SFilterGroupCtx** gRes1, SFilter } assert(idx1 == idx2); - + ++merNum; - + filterMergeTwoGroupsImpl(info, &ctx, LOGIC_COND_TYPE_OR, idx1, *gRes1, *gRes2, NULL, all); FLT_CHK_JMP(*all); @@ -2230,7 +2231,7 @@ int32_t filterMergeTwoGroups(SFilterInfo *info, SFilterGroupCtx** gRes1, SFilter if (equal) { ++equal1; } - + filterCompareRangeCtx(ctx, (*gRes2)->colInfo[idx2].info, &equal); if (equal) { ++equal2; @@ -2250,7 +2251,7 @@ int32_t filterMergeTwoGroups(SFilterInfo *info, SFilterGroupCtx** gRes1, SFilter FLT_CHK_JMP(equal1 != merNum); colCtx.colIdx = idx1; - colCtx.ctx = ctx; + colCtx.ctx = ctx; ctx = NULL; taosArrayPush(colCtxs, &colCtx); } @@ -2272,17 +2273,17 @@ int32_t filterMergeTwoGroups(SFilterInfo *info, SFilterGroupCtx** gRes1, SFilter int32_t ctxSize = (int32_t)taosArrayGetSize(colCtxs); SFilterColCtx *pctx = NULL; - + for (int32_t i = 0; i < ctxSize; ++i) { pctx = taosArrayGet(colCtxs, i); colInfo = &(*gRes1)->colInfo[pctx->colIdx]; - + filterFreeColInfo(colInfo); FILTER_PUSH_CTX((*gRes1)->colInfo[pctx->colIdx], pctx->ctx); } taosArrayDestroy(colCtxs); - + return TSDB_CODE_SUCCESS; _return: @@ -2309,7 +2310,7 @@ int32_t filterMergeGroups(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t *gR taosSort(gRes, *gResNum, POINTER_BYTES, filterCompareGroupCtx); int32_t pEnd = 0, cStart = 0, cEnd = 0; - uint32_t pColNum = 0, cColNum = 0; + uint32_t pColNum = 0, cColNum = 0; int32_t movedNum = 0; bool all = false; @@ -2335,7 +2336,7 @@ int32_t filterMergeGroups(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t *gR if (n < ((*gResNum) - 1)) { memmove(&gRes[n], &gRes[n+1], (*gResNum-n-1) * POINTER_BYTES); } - + --cEnd; --(*gResNum); ++movedNum; @@ -2351,12 +2352,12 @@ int32_t filterMergeGroups(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t *gR filterMergeTwoGroups(info, &gRes[m], &gRes[n], &all); FLT_CHK_JMP(all); - + if (gRes[n] == NULL) { if (n < ((*gResNum) - 1)) { memmove(&gRes[n], &gRes[n+1], (*gResNum-n-1) * POINTER_BYTES); } - + --cEnd; --(*gResNum); ++movedNum; @@ -2373,13 +2374,13 @@ int32_t filterMergeGroups(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t *gR if (i >= (*gResNum)) { break; } - + cStart = i; - cColNum = gRes[i]->colNum; + cColNum = gRes[i]->colNum; } return TSDB_CODE_SUCCESS; - + _return: FILTER_SET_FLAG(info->status, FI_STATUS_ALL); @@ -2410,11 +2411,11 @@ int32_t filterRewrite(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t gResNum qDebug("no need rewrite"); return TSDB_CODE_SUCCESS; } - + SFilterInfo oinfo = *info; FILTER_SET_FLAG(oinfo.status, FI_STATUS_CLONED); - + SArray* group = taosArrayInit(FILTER_DEFAULT_GROUP_SIZE, sizeof(SFilterGroup)); SFilterGroupCtx *res = NULL; SFilterColInfo *colInfo = NULL; @@ -2422,7 +2423,7 @@ int32_t filterRewrite(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t gResNum uint32_t uidx = 0; memset(info, 0, sizeof(*info)); - + info->colRangeNum = oinfo.colRangeNum; info->colRange = oinfo.colRange; oinfo.colRangeNum = 0; @@ -2438,25 +2439,25 @@ int32_t filterRewrite(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t gResNum optr = (res->colNum > 1) ? LOGIC_COND_TYPE_AND : LOGIC_COND_TYPE_OR; SFilterGroup ng = {0}; - + for (uint32_t m = 0; m < res->colNum; ++m) { colInfo = &res->colInfo[res->colIdx[m]]; if (FILTER_NO_MERGE_DATA_TYPE(colInfo->dataType)) { assert(colInfo->type == RANGE_TYPE_UNIT); int32_t usize = (int32_t)taosArrayGetSize((SArray *)colInfo->info); - + for (int32_t n = 0; n < usize; ++n) { SFilterUnit* u = taosArrayGetP((SArray *)colInfo->info, n); - + filterAddUnitFromUnit(info, &oinfo, u, &uidx); filterAddUnitToGroup(&ng, uidx); } - + continue; } - + assert(colInfo->type == RANGE_TYPE_MR_CTX); - + filterAddGroupUnitFromCtx(info, &oinfo, colInfo->info, res->colIdx[m], &ng, optr, group); } @@ -2497,7 +2498,7 @@ int32_t filterGenerateColRange(SFilterInfo *info, SFilterGroupCtx** gRes, int32_ } assert(idxNum[i] == gResNum); - + if (idxs == NULL) { idxs = taosMemoryCalloc(info->fields[FLD_TYPE_COLUMN].num, sizeof(*idxs)); } @@ -2536,7 +2537,7 @@ int32_t filterGenerateColRange(SFilterInfo *info, SFilterGroupCtx** gRes, int32_ if (all) { filterFreeRangeCtx(info->colRange[m]); info->colRange[m] = NULL; - + if (m < (info->colRangeNum - 1)) { memmove(&info->colRange[m], &info->colRange[m + 1], (info->colRangeNum - m - 1) * POINTER_BYTES); memmove(&idxs[m], &idxs[m + 1], (info->colRangeNum - m - 1) * sizeof(*idxs)); @@ -2545,10 +2546,10 @@ int32_t filterGenerateColRange(SFilterInfo *info, SFilterGroupCtx** gRes, int32_ --info->colRangeNum; --m; - FLT_CHK_JMP(info->colRangeNum <= 0); + FLT_CHK_JMP(info->colRangeNum <= 0); } - ++n; + ++n; break; } } @@ -2588,7 +2589,7 @@ int32_t filterGenerateComInfo(SFilterInfo *info) { info->cunits[i].optr = FILTER_UNIT_OPTR(unit); info->cunits[i].colData = NULL; info->cunits[i].colId = FILTER_UNIT_COL_ID(info, unit); - + if (unit->right.type == FLD_TYPE_VALUE) { info->cunits[i].valData = FILTER_UNIT_VAL_DATA(info, unit); } else { @@ -2599,11 +2600,11 @@ int32_t filterGenerateComInfo(SFilterInfo *info) { } else { info->cunits[i].valData2 = info->cunits[i].valData; } - + info->cunits[i].dataSize = FILTER_UNIT_COL_SIZE(info, unit); info->cunits[i].dataType = FILTER_UNIT_DATA_TYPE(unit); } - + return TSDB_CODE_SUCCESS; } @@ -2623,7 +2624,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 int32_t rmUnit = 0; memset(info->blkUnitRes, 0, sizeof(*info->blkUnitRes) * info->unitNum); - + for (uint32_t k = 0; k < info->unitNum; ++k) { int32_t index = -1; SFilterComUnit *cunit = &info->cunits[k]; @@ -2662,7 +2663,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 rmUnit = 1; continue; } - + info->blkUnitRes[k] = -1; rmUnit = 1; continue; @@ -2683,7 +2684,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 if (cunit->dataType == TSDB_DATA_TYPE_FLOAT) { minv = (float)(*(double *)(&pDataBlockst->min)); maxv = (float)(*(double *)(&pDataBlockst->max)); - + minVal = &minv; maxVal = &maxv; } else { @@ -2707,7 +2708,7 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 if (minRes && maxRes) { continue; } - + info->blkUnitRes[k] = -1; rmUnit = 1; } @@ -2726,10 +2727,10 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 info->blkUnitRes[k] = -1; rmUnit = 1; } - + continue; } - + info->blkUnitRes[k] = -1; rmUnit = 1; } @@ -2743,17 +2744,17 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 } info->blkGroupNum = info->groupNum; - + uint32_t *unitNum = info->blkUnits; uint32_t *unitIdx = unitNum + 1; int32_t all = 0, empty = 0; - + for (uint32_t g = 0; g < info->groupNum; ++g) { SFilterGroup *group = &info->groups[g]; *unitNum = group->unitNum; - all = 0; + all = 0; empty = 0; - + for (uint32_t u = 0; u < group->unitNum; ++u) { uint32_t uidx = group->unitIdxs[u]; if (info->blkUnitRes[uidx] == 1) { @@ -2772,14 +2773,14 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3 if (*unitNum == 0) { --info->blkGroupNum; assert(empty || all); - + if (empty) { FILTER_SET_FLAG(info->blkFlag, FI_STATUS_BLK_EMPTY); } else { FILTER_SET_FLAG(info->blkFlag, FI_STATUS_BLK_ALL); goto _return; } - + continue; } @@ -2807,18 +2808,18 @@ bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, int8_t** p, if (*p == NULL) { *p = taosMemoryCalloc(numOfRows, sizeof(int8_t)); } - + for (int32_t i = 0; i < numOfRows; ++i) { //FILTER_UNIT_CLR_F(info); unitIdx = info->blkUnits; - + for (uint32_t g = 0; g < info->blkGroupNum; ++g) { uint32_t unitNum = *(unitIdx++); for (uint32_t u = 0; u < unitNum; ++u) { SFilterComUnit *cunit = &info->cunits[*(unitIdx + u)]; void *colData = colDataGetData((SColumnInfoData *)cunit->colData, i); - + //if (FILTER_UNIT_GET_F(info, uidx)) { // p[i] = FILTER_UNIT_GET_R(info, uidx); //} else { @@ -2836,7 +2837,7 @@ bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, int8_t** p, } else { (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData); } - + //FILTER_UNIT_SET_R(info, uidx, p[i]); //FILTER_UNIT_SET_F(info, uidx); } @@ -2855,7 +2856,7 @@ bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, int8_t** p, if ((*p)[i] == 0) { all = false; - } + } } return all; @@ -2864,11 +2865,11 @@ bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, int8_t** p, int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols, bool* all) { - if (statis && numOfRows >= FILTER_RM_UNIT_MIN_ROWS) { + if (statis && numOfRows >= FILTER_RM_UNIT_MIN_ROWS) { info->blkFlag = 0; - + filterRmUnitByRange(info, statis, numOfCols, numOfRows); - + if (info->blkFlag) { if (FILTER_GET_FLAG(info->blkFlag, FI_STATUS_BLK_ALL)) { *all = true; @@ -2879,7 +2880,7 @@ int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, int8_t* } assert(info->unitNum > 1); - + *all = filterExecuteBasedOnStatisImpl(info, numOfRows, p, statis, numOfCols); goto _return; @@ -2890,7 +2891,7 @@ int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, int8_t* _return: info->blkFlag = 0; - + return TSDB_CODE_SUCCESS; } @@ -2912,7 +2913,7 @@ static FORCE_INLINE bool filterExecuteImplIsNull(void *pinfo, int32_t numOfRows, if (*p == NULL) { *p = taosMemoryCalloc(numOfRows, sizeof(int8_t)); } - + for (int32_t i = 0; i < numOfRows; ++i) { uint32_t uidx = info->groups[0].unitIdxs[0]; void *colData = colDataGetData((SColumnInfoData *)info->cunits[uidx].colData, i); @@ -2920,7 +2921,7 @@ static FORCE_INLINE bool filterExecuteImplIsNull(void *pinfo, int32_t numOfRows, if ((*p)[i] == 0) { all = false; - } + } } return all; @@ -2936,7 +2937,7 @@ static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows if (*p == NULL) { *p = taosMemoryCalloc(numOfRows, sizeof(int8_t)); } - + for (int32_t i = 0; i < numOfRows; ++i) { uint32_t uidx = info->groups[0].unitIdxs[0]; void *colData = colDataGetData((SColumnInfoData *)info->cunits[uidx].colData, i); @@ -2966,8 +2967,8 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t** p, SColumnD if (*p == NULL) { *p = taosMemoryCalloc(numOfRows, sizeof(int8_t)); } - - for (int32_t i = 0; i < numOfRows; ++i) { + + for (int32_t i = 0; i < numOfRows; ++i) { void *colData = colDataGetData((SColumnInfoData *)info->cunits[0].colData, i); SColumnInfoData* pData = info->cunits[0].colData; if (colData == NULL || colDataIsNull_s(pData, i)) { @@ -2976,7 +2977,7 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t** p, SColumnD } (*p)[i] = (*rfunc)(colData, colData, valData, valData2, func); - + if ((*p)[i] == 0) { all = false; } @@ -2992,11 +2993,11 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDa if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) { return all; } - + if (*p == NULL) { *p = taosMemoryCalloc(numOfRows, sizeof(int8_t)); } - + for (int32_t i = 0; i < numOfRows; ++i) { uint32_t uidx = info->groups[0].unitIdxs[0]; void *colData = colDataGetData((SColumnInfoData *)info->cunits[uidx].colData, i); @@ -3041,17 +3042,17 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAg if (*p == NULL) { *p = taosMemoryCalloc(numOfRows, sizeof(int8_t)); } - + for (int32_t i = 0; i < numOfRows; ++i) { //FILTER_UNIT_CLR_F(info); - + for (uint32_t g = 0; g < info->groupNum; ++g) { SFilterGroup *group = &info->groups[g]; for (uint32_t u = 0; u < group->unitNum; ++u) { uint32_t uidx = group->unitIdxs[u]; SFilterComUnit *cunit = &info->cunits[uidx]; void *colData = colDataGetData((SColumnInfoData *)(cunit->colData), i); - + //if (FILTER_UNIT_GET_F(info, uidx)) { // p[i] = FILTER_UNIT_GET_R(info, uidx); //} else { @@ -3081,7 +3082,7 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAg (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData); } } - + //FILTER_UNIT_SET_R(info, uidx, p[i]); //FILTER_UNIT_SET_F(info, uidx); } @@ -3098,7 +3099,7 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAg if ((*p)[i] == 0) { all = false; - } + } } return all; @@ -3132,11 +3133,11 @@ int32_t filterSetExecFunc(SFilterInfo *info) { if (info->cunits[0].rfunc >= 0) { info->func = filterExecuteImplRange; - return TSDB_CODE_SUCCESS; + return TSDB_CODE_SUCCESS; } info->func = filterExecuteImplMisc; - return TSDB_CODE_SUCCESS; + return TSDB_CODE_SUCCESS; } @@ -3144,7 +3145,7 @@ int32_t filterSetExecFunc(SFilterInfo *info) { int32_t filterPreprocess(SFilterInfo *info) { SFilterGroupCtx** gRes = taosMemoryCalloc(info->groupNum, sizeof(SFilterGroupCtx *)); int32_t gResNum = 0; - + filterMergeGroupUnits(info, gRes, &gResNum); filterMergeGroups(info, gRes, &gResNum); @@ -3154,11 +3155,11 @@ int32_t filterPreprocess(SFilterInfo *info) { goto _return; } - + if (FILTER_GET_FLAG(info->status, FI_STATUS_EMPTY)) { fltInfo("Final - FilterInfo: [EMPTY]"); goto _return; - } + } filterGenerateColRange(info, gRes, gResNum); @@ -3179,7 +3180,7 @@ _return: } taosMemoryFreeClear(gRes); - + return TSDB_CODE_SUCCESS; } @@ -3207,7 +3208,7 @@ int32_t fltSetColFieldDataImpl(SFilterInfo *info, void *param, filer_get_col_fro int32_t fltInitFromNode(SNode* tree, SFilterInfo *info, uint32_t options) { int32_t code = TSDB_CODE_SUCCESS; - + SArray* group = taosArrayInit(FILTER_DEFAULT_GROUP_SIZE, sizeof(SFilterGroup)); filterInitUnitsFields(info); @@ -3225,13 +3226,13 @@ int32_t fltInitFromNode(SNode* tree, SFilterInfo *info, uint32_t options) { filterDumpInfoToString(info, "Before preprocess", 0); FLT_ERR_JRET(filterPreprocess(info)); - + FLT_CHK_JMP(FILTER_GET_FLAG(info->status, FI_STATUS_ALL)); if (FILTER_GET_FLAG(info->status, FI_STATUS_EMPTY)) { return code; } - } + } info->unitRes = taosMemoryMalloc(info->unitNum * sizeof(*info->unitRes)); info->unitFlags = taosMemoryMalloc(info->unitNum * sizeof(*info->unitFlags)); @@ -3252,10 +3253,10 @@ bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t if (FILTER_ALL_RES(info)) { return true; } - + bool ret = true; void *minVal, *maxVal; - + for (uint32_t k = 0; k < info->colRangeNum; ++k) { int32_t index = -1; SFilterRangeCtx *ctx = info->colRange[k]; @@ -3305,7 +3306,7 @@ bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t if (ctx->type == TSDB_DATA_TYPE_FLOAT) { minv = (float)(*(double *)(&pDataBlockst->min)); maxv = (float)(*(double *)(&pDataBlockst->max)); - + minVal = &minv; maxVal = &maxv; } else { @@ -3320,7 +3321,7 @@ bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg *pDataStatis, int32_t } r = r->next; } - + if (!ret) { return ret; } @@ -3356,10 +3357,10 @@ int32_t filterGetTimeRangeImpl(SFilterInfo *info, STimeWindow *win, bool * SFilterUnit *unit = &info->units[uidx]; uint8_t raOptr = FILTER_UNIT_OPTR(unit); - + filterAddRangeOptr(cur, raOptr, LOGIC_COND_TYPE_AND, &empty, NULL); FLT_CHK_JMP(empty); - + if (FILTER_NO_MERGE_OPTR(raOptr)) { continue; } @@ -3392,10 +3393,10 @@ int32_t filterGetTimeRangeImpl(SFilterInfo *info, STimeWindow *win, bool * *isStrict = false; qDebug("more than one time range, num:%d", num); } - + SFilterRange tra; filterGetRangeRes(prev, &tra); - win->skey = tra.s; + win->skey = tra.s; win->ekey = tra.e; if (FILTER_GET_FLAG(tra.sflag, RANGE_FLG_EXCLUDE)) { win->skey++; @@ -3427,7 +3428,7 @@ _return: int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) { SFilterInfo *info = NULL; int32_t code = 0; - + *isStrict = true; FLT_ERR_RET(filterInitFromNode(pNode, &info, FLT_OPTION_NO_REWRITE|FLT_OPTION_TIMESTAMP)); @@ -3452,7 +3453,7 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar if (FILTER_EMPTY_RES(info) || FILTER_ALL_RES(info)) { return TSDB_CODE_SUCCESS; } - + for (uint32_t i = 0; i < info->fields[FLD_TYPE_COLUMN].num; ++i) { SFilterField* fi = &info->fields[FLD_TYPE_COLUMN].fields[i]; int32_t type = FILTER_GET_COL_FIELD_TYPE(fi); @@ -3484,7 +3485,7 @@ int32_t filterConverNcharColumns(SFilterInfo* info, int32_t rows, bool *gotNchar } fi->data = nfi.data; - + *gotNchar = true; } } @@ -3535,11 +3536,11 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { stat->code = TSDB_CODE_QRY_INVALID_INPUT; return DEAL_RES_ERROR; } - + if ((QUERY_NODE_OPERATOR != nodeType(cell->pNode)) && (QUERY_NODE_LOGIC_CONDITION != nodeType(cell->pNode))) { stat->scalarMode = true; } - + cell = cell->pNext; } @@ -3552,11 +3553,11 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { stat->scalarMode = true; return DEAL_RES_CONTINUE; } - + if (!FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP)) { return DEAL_RES_CONTINUE; } - + if (TSDB_DATA_TYPE_BINARY != valueNode->node.resType.type && TSDB_DATA_TYPE_NCHAR != valueNode->node.resType.type) { return DEAL_RES_CONTINUE; } @@ -3567,7 +3568,7 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { stat->code = code; return DEAL_RES_ERROR; } - + return DEAL_RES_CONTINUE; } @@ -3585,7 +3586,7 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { stat->precision = colNode->node.resType.precision; return DEAL_RES_CONTINUE; } - + if (QUERY_NODE_NODE_LIST == nodeType(*pNode)) { SNodeListNode *listNode = (SNodeListNode *)*pNode; if (QUERY_NODE_VALUE != nodeType(listNode->pNodeList->pHead->pNode)) { @@ -3623,7 +3624,7 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { return DEAL_RES_CONTINUE; } - if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) && + if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) && (node->opType >= OP_TYPE_NOT_EQUAL) && (node->opType != OP_TYPE_IS_NULL && node->opType != OP_TYPE_IS_NOT_NULL)) { stat->scalarMode = true; return DEAL_RES_CONTINUE; @@ -3635,7 +3636,7 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { stat->code = TSDB_CODE_QRY_APP_ERROR; return DEAL_RES_ERROR; } - + if (QUERY_NODE_COLUMN != nodeType(node->pLeft)) { stat->scalarMode = true; return DEAL_RES_CONTINUE; @@ -3655,7 +3656,7 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { if ((QUERY_NODE_COLUMN != nodeType(node->pRight)) && (QUERY_NODE_VALUE != nodeType(node->pRight)) && (QUERY_NODE_NODE_LIST != nodeType(node->pRight))) { stat->scalarMode = true; return DEAL_RES_CONTINUE; - } + } if (nodeType(node->pLeft) == nodeType(node->pRight)) { stat->scalarMode = true; @@ -3698,7 +3699,7 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { if (OP_TYPE_IN != node->opType) { SColumnNode *refNode = (SColumnNode *)node->pLeft; SValueNode *valueNode = (SValueNode *)node->pRight; - if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) + if (FILTER_GET_FLAG(stat->info->options, FLT_OPTION_TIMESTAMP) && TSDB_DATA_TYPE_UBIGINT == valueNode->node.resType.type && valueNode->datum.u <= INT64_MAX) { valueNode->node.resType.type = TSDB_DATA_TYPE_BIGINT; } @@ -3719,12 +3720,12 @@ EDealRes fltReviseRewriter(SNode** pNode, void* pContext) { } return DEAL_RES_CONTINUE; - } - + } + fltError("invalid node type for filter, type:%d", nodeType(*pNode)); - + stat->code = TSDB_CODE_QRY_INVALID_INPUT; - + return DEAL_RES_ERROR; } @@ -3737,7 +3738,7 @@ int32_t fltReviseNodes(SFilterInfo *pInfo, SNode** pNode, SFltTreeStat *pStat) { int32_t nodeNum = taosArrayGetSize(pStat->nodeList); for (int32_t i = 0; i < nodeNum; ++i) { SValueNode *valueNode = *(SValueNode **)taosArrayGet(pStat->nodeList, i); - + FLT_ERR_JRET(sclConvertToTsValueNode(pStat->precision, valueNode)); } @@ -3756,7 +3757,7 @@ int32_t fltOptimizeNodes(SFilterInfo *pInfo, SNode** pNode, SFltTreeStat *pStat) int32_t fltGetDataFromColId(void *param, int32_t id, void **data) { int32_t numOfCols = ((SFilterColumnParam *)param)->numOfCols; SArray* pDataBlock = ((SFilterColumnParam *)param)->pDataBlock; - + for (int32_t j = 0; j < numOfCols; ++j) { SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, j); if (id == pColInfo->info.colId) { @@ -3775,7 +3776,7 @@ int32_t fltGetDataFromSlotId(void *param, int32_t id, void **data) { fltError("invalid slot id, id:%d, numOfCols:%d, arraySize:%d", id, numOfCols, (int32_t)taosArrayGetSize(pDataBlock)); return TSDB_CODE_QRY_APP_ERROR; } - + SColumnInfoData* pColInfo = taosArrayGet(pDataBlock, id); *data = pColInfo; @@ -3801,7 +3802,7 @@ int32_t filterSetDataFromColId(SFilterInfo *info, void *param) { int32_t filterInitFromNode(SNode* pNode, SFilterInfo **pInfo, uint32_t options) { int32_t code = 0; SFilterInfo *info = NULL; - + if (pNode == NULL || pInfo == NULL) { fltError("invalid param"); FLT_ERR_RET(TSDB_CODE_QRY_APP_ERROR); @@ -3821,7 +3822,7 @@ int32_t filterInitFromNode(SNode* pNode, SFilterInfo **pInfo, uint32_t options) SFltTreeStat stat = {0}; stat.precision = -1; stat.info = info; - + FLT_ERR_JRET(fltReviseNodes(info, &pNode, &stat)); info->scalarMode = stat.scalarMode; @@ -3833,11 +3834,11 @@ int32_t filterInitFromNode(SNode* pNode, SFilterInfo **pInfo, uint32_t options) info->sclCtx.node = pNode; FLT_ERR_JRET(fltOptimizeNodes(info, &info->sclCtx.node, &stat)); } - + return code; _return: - + filterFreeInfo(*pInfo); *pInfo = NULL; @@ -3877,4 +3878,228 @@ bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, int8_t** p, SColumnData } +typedef struct SClassifyConditionCxt { + bool hasPrimaryKey; + bool hasTagIndexCol; + bool hasTagCol; + bool hasOtherCol; +} SClassifyConditionCxt; + +static EDealRes classifyConditionImpl(SNode* pNode, void* pContext) { + SClassifyConditionCxt* pCxt = (SClassifyConditionCxt*)pContext; + if (QUERY_NODE_COLUMN == nodeType(pNode)) { + SColumnNode* pCol = (SColumnNode*)pNode; + if (PRIMARYKEY_TIMESTAMP_COL_ID == pCol->colId && TSDB_SYSTEM_TABLE != pCol->tableType) { + pCxt->hasPrimaryKey = true; + } else if (pCol->hasIndex) { + pCxt->hasTagIndexCol = true; + pCxt->hasTagCol = true; + } else if (COLUMN_TYPE_TAG == pCol->colType || COLUMN_TYPE_TBNAME == pCol->colType) { + pCxt->hasTagCol = true; + } else { + pCxt->hasOtherCol = true; + } + } else if (QUERY_NODE_FUNCTION == nodeType(pNode)) { + SFunctionNode* pFunc = (SFunctionNode*)pNode; + if (fmIsPseudoColumnFunc(pFunc->funcId)) { + if (FUNCTION_TYPE_TBNAME==pFunc->funcType) { + pCxt->hasTagCol = true; + } else { + pCxt->hasOtherCol = true; + } + } + } + return DEAL_RES_CONTINUE; +} + +typedef enum EConditionType { + COND_TYPE_PRIMARY_KEY = 1, + COND_TYPE_TAG_INDEX, + COND_TYPE_TAG, + COND_TYPE_NORMAL +} EConditionType; + +static EConditionType classifyCondition(SNode* pNode) { + SClassifyConditionCxt cxt = {.hasPrimaryKey = false, .hasTagIndexCol = false, .hasOtherCol = false}; + nodesWalkExpr(pNode, classifyConditionImpl, &cxt); + return cxt.hasOtherCol ? COND_TYPE_NORMAL + : (cxt.hasPrimaryKey && cxt.hasTagCol + ? COND_TYPE_NORMAL + : (cxt.hasPrimaryKey ? COND_TYPE_PRIMARY_KEY + : (cxt.hasTagIndexCol ? COND_TYPE_TAG_INDEX : COND_TYPE_TAG))); +} + +static bool isCondColumnsFromMultiTable(SNode* pCond) { + SNodeList* pCondCols = nodesMakeList(); + int32_t code = nodesCollectColumnsFromNode(pCond, NULL, COLLECT_COL_TYPE_ALL, &pCondCols); + if (code == TSDB_CODE_SUCCESS) { + if (LIST_LENGTH(pCondCols) >= 2) { + SColumnNode* pFirstCol = (SColumnNode*)nodesListGetNode(pCondCols, 0); + SNode* pColNode = NULL; + FOREACH(pColNode, pCondCols) { + if (strcmp(((SColumnNode*)pColNode)->dbName, pFirstCol->dbName) != 0 || + strcmp(((SColumnNode*)pColNode)->tableAlias, pFirstCol->tableAlias) != 0) { + nodesDestroyList(pCondCols); + return true; + } + } + } + nodesDestroyList(pCondCols); + } + return false; +} +static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, + SNode** pOtherCond) { + SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(*pCondition); + + int32_t code = TSDB_CODE_SUCCESS; + + SNodeList* pPrimaryKeyConds = NULL; + SNodeList* pTagIndexConds = NULL; + SNodeList* pTagConds = NULL; + SNodeList* pOtherConds = NULL; + SNode* pCond = NULL; + FOREACH(pCond, pLogicCond->pParameterList) { + if (isCondColumnsFromMultiTable(pCond)) { + if (NULL != pOtherCond) { + code = nodesListMakeAppend(&pOtherConds, nodesCloneNode(pCond)); + } + } else { + switch (classifyCondition(pCond)) { + case COND_TYPE_PRIMARY_KEY: + if (NULL != pPrimaryKeyCond) { + code = nodesListMakeAppend(&pPrimaryKeyConds, nodesCloneNode(pCond)); + } + break; + case COND_TYPE_TAG_INDEX: + if (NULL != pTagIndexCond) { + code = nodesListMakeAppend(&pTagIndexConds, nodesCloneNode(pCond)); + } + if (NULL != pTagCond) { + code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); + } + break; + case COND_TYPE_TAG: + if (NULL != pTagCond) { + code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond)); + } + break; + case COND_TYPE_NORMAL: + default: + if (NULL != pOtherCond) { + code = nodesListMakeAppend(&pOtherConds, nodesCloneNode(pCond)); + } + break; + } + } + if (TSDB_CODE_SUCCESS != code) { + break; + } + } + + SNode* pTempPrimaryKeyCond = NULL; + SNode* pTempTagIndexCond = NULL; + SNode* pTempTagCond = NULL; + SNode* pTempOtherCond = NULL; + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempPrimaryKeyCond, &pPrimaryKeyConds); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempTagIndexCond, &pTagIndexConds); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempTagCond, &pTagConds); + } + if (TSDB_CODE_SUCCESS == code) { + code = nodesMergeConds(&pTempOtherCond, &pOtherConds); + } + + if (TSDB_CODE_SUCCESS == code) { + if (NULL != pPrimaryKeyCond) { + *pPrimaryKeyCond = pTempPrimaryKeyCond; + } + if (NULL != pTagIndexCond) { + *pTagIndexCond = pTempTagIndexCond; + } + if (NULL != pTagCond) { + *pTagCond = pTempTagCond; + } + if (NULL != pOtherCond) { + *pOtherCond = pTempOtherCond; + } + nodesDestroyNode(*pCondition); + *pCondition = NULL; + } else { + nodesDestroyList(pPrimaryKeyConds); + nodesDestroyList(pTagIndexConds); + nodesDestroyList(pTagConds); + nodesDestroyList(pOtherConds); + nodesDestroyNode(pTempPrimaryKeyCond); + nodesDestroyNode(pTempTagIndexCond); + nodesDestroyNode(pTempTagCond); + nodesDestroyNode(pTempOtherCond); + } + + return code; +} + +int32_t filterPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond, + SNode** pOtherCond) { + if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pCondition) && + LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*pCondition)->condType) { + return partitionLogicCond(pCondition, pPrimaryKeyCond, pTagIndexCond, pTagCond, pOtherCond); + } + + bool needOutput = false; + if (isCondColumnsFromMultiTable(*pCondition)) { + if (NULL != pOtherCond) { + *pOtherCond = *pCondition; + needOutput = true; + } + } else { + switch (classifyCondition(*pCondition)) { + case COND_TYPE_PRIMARY_KEY: + if (NULL != pPrimaryKeyCond) { + *pPrimaryKeyCond = *pCondition; + needOutput = true; + } + break; + case COND_TYPE_TAG_INDEX: + if (NULL != pTagIndexCond) { + *pTagIndexCond = *pCondition; + needOutput = true; + } + if (NULL != pTagCond) { + SNode *pTempCond = *pCondition; + if (NULL != pTagIndexCond) { + pTempCond = nodesCloneNode(*pCondition); + if (NULL == pTempCond) { + return TSDB_CODE_OUT_OF_MEMORY; + } + } + *pTagCond = pTempCond; + needOutput = true; + } + break; + case COND_TYPE_TAG: + if (NULL != pTagCond) { + *pTagCond = *pCondition; + needOutput = true; + } + break; + case COND_TYPE_NORMAL: + default: + if (NULL != pOtherCond) { + *pOtherCond = *pCondition; + needOutput = true; + } + break; + } + } + if (needOutput) { + *pCondition = NULL; + } + + return TSDB_CODE_SUCCESS; +} diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c index cfeac04bbd4166b8ad55102bd844b001c3138e78..d0c5a76f4b03e38851e5810b2143fa9b65cb782f 100644 --- a/source/libs/scalar/src/scalar.c +++ b/source/libs/scalar/src/scalar.c @@ -140,13 +140,23 @@ int32_t scalarGenerateSetFromList(void **data, void *pNode, uint32_t type) { SCL_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY); } + colDataDestroy(out.columnData); + taosMemoryFreeClear(out.columnData); + out.columnData = taosMemoryCalloc(1, sizeof(SColumnInfoData)); + cell = cell->pNext; } *data = pObj; + + colDataDestroy(out.columnData); + taosMemoryFreeClear(out.columnData); return TSDB_CODE_SUCCESS; _return: + + colDataDestroy(out.columnData); + taosMemoryFreeClear(out.columnData); taosHashCleanup(pObj); SCL_RET(code); } diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c index 46140ccdff58eaa79863a4685112dfc64f853ff4..82146c374185d7569c7fe6ac2344e95e302e1391 100644 --- a/source/libs/scheduler/src/schJob.c +++ b/source/libs/scheduler/src/schJob.c @@ -278,7 +278,7 @@ int32_t schValidateAndBuildJob(SQueryPlan *pDag, SSchJob *pJob) { } SHashObj *planToTask = taosHashInit( - SCHEDULE_DEFAULT_MAX_TASK_NUM, + pDag->numOfSubplans, taosGetDefaultHashFunction(POINTER_BYTES == sizeof(int64_t) ? TSDB_DATA_TYPE_BIGINT : TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK); if (NULL == planToTask) { diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c index 3bec91226c2e59dcaedc0dde04f957703462610f..544ecce175bec5b3ccbd2654210db6d51e3e46e9 100644 --- a/source/libs/scheduler/src/schRemote.c +++ b/source/libs/scheduler/src/schRemote.c @@ -92,7 +92,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa switch (msgType) { case TDMT_VND_COMMIT_RSP: { SCH_ERR_JRET(rspCode); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } case TDMT_VND_CREATE_TABLE_RSP: { @@ -118,7 +118,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa SCH_ERR_JRET(rspCode); taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } case TDMT_VND_DROP_TABLE_RSP: { @@ -144,7 +144,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa SCH_ERR_JRET(rspCode); taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } case TDMT_VND_ALTER_TABLE_RSP: { @@ -169,7 +169,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } case TDMT_VND_SUBMIT_RSP: { @@ -218,7 +218,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -238,7 +238,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -263,7 +263,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa taosMemoryFreeClear(msg); - SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask)); + SCH_ERR_JRET(schProcessOnTaskSuccess(pJob, pTask)); break; } @@ -379,13 +379,15 @@ int32_t schHandleCallback(void *param, SDataBuf *pMsg, int32_t rspCode) { qDebug("begin to handle rsp msg, type:%s, handle:%p, code:%s", TMSG_INFO(pMsg->msgType), pMsg->handle, tstrerror(rspCode)); - SCH_ERR_RET(schProcessOnCbBegin(&pJob, &pTask, pParam->queryId, pParam->refId, pParam->taskId)); + SCH_ERR_JRET(schProcessOnCbBegin(&pJob, &pTask, pParam->queryId, pParam->refId, pParam->taskId)); code = schHandleResponseMsg(pJob, pTask, pParam->execId, pMsg, rspCode); pMsg->pData = NULL; schProcessOnCbEnd(pJob, pTask, code); +_return: + taosMemoryFreeClear(pMsg->pData); qDebug("end to handle rsp msg, type:%s, handle:%p, code:%s", TMSG_INFO(pMsg->msgType), pMsg->handle, @@ -398,6 +400,9 @@ int32_t schHandleDropCallback(void *param, SDataBuf *pMsg, int32_t code) { SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param; qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 " drop task rsp received, code:0x%x", pParam->queryId, pParam->taskId, code); + if (pMsg) { + taosMemoryFree(pMsg->pData); + } return TSDB_CODE_SUCCESS; } @@ -408,6 +413,8 @@ int32_t schHandleLinkBrokenCallback(void *param, SDataBuf *pMsg, int32_t code) { qDebug("handle %p is broken", pMsg->handle); if (head->isHbParam) { + taosMemoryFree(pMsg->pData); + SSchHbCallbackParam *hbParam = (SSchHbCallbackParam *)param; SSchTrans trans = {.pTrans = hbParam->pTrans, .pHandle = NULL}; SCH_ERR_RET(schUpdateHbConnection(&hbParam->nodeEpId, &trans)); diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index 025891a26c86672a6ab2ad27dcce150cebf2ea72..ecd0253e1c7c5c904d3b14bb64eaa124ef00571d 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -424,10 +424,15 @@ int32_t schHandleRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32 } } - SCH_RET(schDoTaskRedirect(pJob, pTask, pData, rspCode)); + code = schDoTaskRedirect(pJob, pTask, pData, rspCode); + taosMemoryFree(pData->pData); + + SCH_RET(code); _return: + taosMemoryFree(pData->pData); + SCH_RET(schProcessOnTaskFailure(pJob, pTask, code)); } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index ec1dd693e1af740037dd0374958556811ec6180b..95e5b61dbddaf9b6fbfcac45badfa3c62d19da47 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -462,7 +462,8 @@ int32_t streamDispatch(SStreamTask* pTask) { if (streamDispatchAllBlocks(pTask, pBlock) < 0) { ASSERT(0); code = -1; - // TODO set status fail + streamQueueProcessFail(pTask->outputQueue); + atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL); goto FREE; } /*atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);*/ diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c index f2a5ba0ab53ce26bbf39db0426830de253e0732b..ff1ef7b4b95ef9d7ae4e4bac1fdeb17b2fb31b2c 100644 --- a/source/libs/stream/src/streamUpdate.c +++ b/source/libs/stream/src/streamUpdate.c @@ -125,6 +125,9 @@ SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t waterma pInfo->pCloseWinSBF = NULL; _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); pInfo->pMap = taosHashInit(DEFAULT_MAP_CAPACITY, hashFn, true, HASH_NO_LOCK); + pInfo->maxVersion = 0; + pInfo->scanGroupId = 0; + pInfo->scanWindow = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX}; return pInfo; } @@ -185,15 +188,36 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) { } if (ts < pInfo->minTS) { + qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64 , tableId, maxTs, *pMapMaxTs, ts); return true; } else if (res == TSDB_CODE_SUCCESS) { return false; } - qDebug("===stream===bucket:%d, tableId:%" PRIu64 ", maxTs:" PRIu64 ", maxMapTs:" PRIu64 ", ts:%" PRIu64, index, tableId, maxTs, *pMapMaxTs, ts); + qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64 , tableId, maxTs, *pMapMaxTs, ts); // check from tsdb api return true; } +void updateInfoSetScanRange(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version) { + qDebug("===stream===groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version); + pInfo->scanWindow = *pWin; + pInfo->scanGroupId = groupId; + pInfo->maxVersion = version; +} + +bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, uint64_t version) { + if (!pInfo) { + return false; + } + qDebug("===stream===check groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version); + if (pInfo->scanGroupId == groupId && pInfo->scanWindow.skey <= pWin->skey && + pWin->ekey <= pInfo->scanWindow.ekey && version <= pInfo->maxVersion ) { + qDebug("===stream===ignore groupId:%" PRIu64 ", startTs:%" PRIu64 ", endTs:%" PRIu64 ", version:%" PRIu64 , groupId, pWin->skey, pWin->ekey, version); + return true; + } + return false; +} + void updateInfoDestroy(SUpdateInfo *pInfo) { if (pInfo == NULL) { return; diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 586cfc0f15a00fdb567186a40f7b215842a5c927..bc3275a9714f5db4f7866b18153953a1881bf8cb 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -191,8 +191,10 @@ int32_t syncNodeStopElectTimer(SSyncNode* pSyncNode); int32_t syncNodeRestartElectTimer(SSyncNode* pSyncNode, int32_t ms); int32_t syncNodeResetElectTimer(SSyncNode* pSyncNode); int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode); +int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode); int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode); int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode); +int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode); // utils -------------- int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg); diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index 7d0f53640cc0a6e441c31dd3c52166c824dae4d6..0f63510b12f26f4ff867599afb274a6cea276883 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -790,65 +790,6 @@ int32_t syncNodeOnAppendEntriesSnapshotCb(SSyncNode* ths, SyncAppendEntries* pMs } } while (0); -#if 0 - // fake match - // - // condition1: - // I have snapshot, no log, preIndex > myLastIndex - // - // condition2: - // I have snapshot, have log, log <= snapshot, preIndex > myLastIndex - // - // condition3: - // I have snapshot, preIndex < snapshot.lastApplyIndex - // - // condition4: - // I have snapshot, preIndex == snapshot.lastApplyIndex, no data - // - // operation: - // match snapshot.lastApplyIndex - 1; - // no operation on log - do { - SyncIndex myLastIndex = syncNodeGetLastIndex(ths); - SSnapshot snapshot; - ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot); - - bool condition0 = (pMsg->term == ths->pRaftStore->currentTerm) && (ths->state == TAOS_SYNC_STATE_FOLLOWER) && - syncNodeHasSnapshot(ths); - bool condition1 = - condition0 && (ths->pLogStore->syncLogEntryCount(ths->pLogStore) == 0) && (pMsg->prevLogIndex > myLastIndex); // donot use syncLogEntryCount!!! use isEmpty - bool condition2 = condition0 && (ths->pLogStore->syncLogLastIndex(ths->pLogStore) <= snapshot.lastApplyIndex) && - (pMsg->prevLogIndex > myLastIndex); - bool condition3 = condition0 && (pMsg->prevLogIndex < snapshot.lastApplyIndex); - bool condition4 = condition0 && (pMsg->prevLogIndex == snapshot.lastApplyIndex) && (pMsg->dataLen == 0); - bool condition = condition1 || condition2 || condition3 || condition4; - - if (condition) { - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "recv sync-append-entries, fake match, pre-index:%" PRId64 ", pre-term:%" PRIu64, - pMsg->prevLogIndex, pMsg->prevLogTerm); - syncNodeEventLog(ths, logBuf); - - // prepare response msg - SyncAppendEntriesReply* pReply = syncAppendEntriesReplyBuild(ths->vgId); - pReply->srcId = ths->myRaftId; - pReply->destId = pMsg->srcId; - pReply->term = ths->pRaftStore->currentTerm; - pReply->privateTerm = ths->pNewNodeReceiver->privateTerm; - pReply->success = true; - pReply->matchIndex = snapshot.lastApplyIndex; - - // send response - SRpcMsg rpcMsg; - syncAppendEntriesReply2RpcMsg(pReply, &rpcMsg); - syncNodeSendMsgById(&pReply->destId, ths, &rpcMsg); - syncAppendEntriesReplyDestroy(pReply); - - return ret; - } - } while (0); -#endif - // fake match // // condition1: diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index bfaa785d0ffc3fdcfd9229a5fef5419b6edd4e9f..7279f0858be7fa7c27406eb194b1b8e1f7c7d601 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -104,6 +104,16 @@ int32_t syncNodeOnAppendEntriesReplyCb(SSyncNode* ths, SyncAppendEntriesReply* p // only start once static void syncNodeStartSnapshotOnce(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex, SyncTerm lastApplyTerm, SyncAppendEntriesReply* pMsg) { + if (beginIndex > endIndex) { + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "snapshot param error, start:%ld, end:%ld", beginIndex, endIndex); + syncNodeErrorLog(ths, logBuf); + } while (0); + + return; + } + // get sender SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(ths, &(pMsg->srcId)); ASSERT(pSender != NULL); @@ -325,10 +335,6 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries // nextIndex' = [nextIndex EXCEPT ![i][j] = m.mmatchIndex + 1] syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), pMsg->matchIndex + 1); - if (gRaftDetailLog) { - sTrace("update next match, index:%" PRId64 ", success:%d", pMsg->matchIndex + 1, pMsg->success); - } - // matchIndex' = [matchIndex EXCEPT ![i][j] = m.mmatchIndex] syncIndexMgrSetIndex(ths->pMatchIndex, &(pMsg->srcId), pMsg->matchIndex); @@ -339,9 +345,6 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries } else { SyncIndex nextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId)); - if (gRaftDetailLog) { - sTrace("update next index not match, begin, index:%" PRId64 ", success:%d", nextIndex, pMsg->success); - } // notice! int64, uint64 if (nextIndex > SYNC_INDEX_BEGIN) { @@ -383,9 +386,6 @@ int32_t syncNodeOnAppendEntriesReplySnapshotCb(SSyncNode* ths, SyncAppendEntries } syncIndexMgrSetIndex(ths->pNextIndex, &(pMsg->srcId), nextIndex); - if (gRaftDetailLog) { - sTrace("update next index not match, end, index:%" PRId64 ", success:%d", nextIndex, pMsg->success); - } } SyncIndex afterNextIndex = syncIndexMgrGetIndex(ths->pNextIndex, &(pMsg->srcId)); diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 52cbcd00598347d00ff8f655cd1a3da634d1af31..00ce1f7b685264a05f00f6da07a6b22658b2d3b1 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -88,10 +88,6 @@ int64_t syncOpen(const SSyncInfo* pSyncInfo) { SSyncNode* pSyncNode = syncNodeOpen(pSyncInfo); ASSERT(pSyncNode != NULL); - if (gRaftDetailLog) { - syncNodeLog2("syncNodeOpen open success", pSyncNode); - } - pSyncNode->rid = taosAddRef(tsNodeRefId, pSyncNode); if (pSyncNode->rid < 0) { syncFreeNode(pSyncNode); @@ -245,11 +241,7 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg* pNewCfg) { return -1; } - char* newconfig = syncCfg2Str((SSyncCfg*)pNewCfg); - if (gRaftDetailLog) { - sInfo("==syncReconfig== newconfig:%s", newconfig); - } - + char* newconfig = syncCfg2Str((SSyncCfg*)pNewCfg); int32_t ret = 0; SRpcMsg rpcMsg = {0}; @@ -912,12 +904,6 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) { ASSERT(pSyncNode->pRaftCfg != NULL); pSyncInfo->syncCfg = pSyncNode->pRaftCfg->cfg; - if (gRaftDetailLog) { - char* seralized = raftCfg2Str(pSyncNode->pRaftCfg); - sInfo("syncNodeOpen update config :%s", seralized); - taosMemoryFree(seralized); - } - raftCfgClose(pSyncNode->pRaftCfg); } @@ -1312,6 +1298,17 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) { return ret; } +int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) { + int32_t ret = 0; + if (syncEnvIsStart()) { + taosTmrReset(pSyncNode->FpHeartbeatTimerCB, 1, pSyncNode, gSyncEnv->pTimerManager, &pSyncNode->pHeartbeatTimer); + atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser); + } else { + sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId); + } + return ret; +} + int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode) { int32_t ret = 0; atomic_add_fetch_64(&pSyncNode->heartbeatTimerLogicClockUser, 1); @@ -1326,18 +1323,17 @@ int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode) { return 0; } +int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode) { + syncNodeStopHeartbeatTimer(pSyncNode); + syncNodeStartNowHeartbeatTimer(pSyncNode); + return 0; +} + // utils -------------- int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg) { SEpSet epSet; syncUtilraftId2EpSet(destRaftId, &epSet); if (pSyncNode->FpSendMsg != NULL) { - if (gRaftDetailLog) { - char* JsonStr = syncRpcMsg2Str(pMsg); - syncUtilJson2Line(JsonStr); - sTrace("sync send msg, vgId:%d, type:%d, msg:%s", pSyncNode->vgId, pMsg->msgType, JsonStr); - taosMemoryFree(JsonStr); - } - // htonl syncUtilMsgHtoN(pMsg->pCont); diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index 1a2a0836774f9fc3c02b1935610d42a0c63bee85..dc7d8c4f52aba28e6902e0e6c3baa0a904003de2 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -140,6 +140,7 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) { ", match-index:%d, raftid:%" PRId64, pSyncNode->vgId, nextIndex, newNextIndex, SYNC_INDEX_INVALID, pDestId->addr); + syncNodeRestartNowHeartbeatTimer(pSyncNode); return -1; } diff --git a/source/libs/sync/test/sh/auto_bench.sh b/source/libs/sync/test/sh/auto_bench.sh index 32dc071018a02ebded53f8648a821748ae9d02ca..d786b5f43cf8af7c1ff0d434e2e1f238f520952b 100644 --- a/source/libs/sync/test/sh/auto_bench.sh +++ b/source/libs/sync/test/sh/auto_bench.sh @@ -1,7 +1,7 @@ #!/bin/bash -if [ $# != 5 ] ; then - echo "Uasge: $0 instances vgroups replica ctables rows" +if [ $# != 7 ] ; then + echo "Uasge: $0 instances vgroups replica ctables rows weak drop(yes/no)" echo "" exit 1 fi @@ -11,11 +11,14 @@ vgroups=$2 replica=$3 ctables=$4 rows=$5 +weak=$6 +drop=$7 -echo "params: instances:${instances}, vgroups:${vgroups}, replica:${replica}, ctables:${ctables}, rows:${rows}" + +echo "params: instances:${instances}, vgroups:${vgroups}, replica:${replica}, ctables:${ctables}, rows:${rows}, weak:${weak}, drop:${drop}" dt=`date "+%Y-%m-%d-%H-%M-%S"` -casedir=instances_${instances}_vgroups_${vgroups}_replica_${replica}_ctables_${ctables}_rows_${rows}_${dt} +casedir=instances_${instances}_vgroups_${vgroups}_replica_${replica}_ctables_${ctables}_rows_${rows}_weak_${weak}_drop_${drop}_${dt} mkdir ${casedir} cp ./insert.tpl.json ${casedir} cd ${casedir} @@ -25,6 +28,7 @@ for i in `seq 1 ${instances}`;do cfg_file=bench_${i}.json cp ./insert.tpl.json ${cfg_file} rstfile=result_${i} + sed -i 's/tpl_drop_tpl/'${drop}'/g' ${cfg_file} sed -i 's/tpl_vgroups_tpl/'${vgroups}'/g' ${cfg_file} sed -i 's/tpl_replica_tpl/'${replica}'/g' ${cfg_file} sed -i 's/tpl_ctables_tpl/'${ctables}'/g' ${cfg_file} diff --git a/source/libs/sync/test/sh/insert.tpl.json b/source/libs/sync/test/sh/insert.tpl.json index 633dd70a24d7657f8add131ebe8b700830e8cd38..1d952b98e8dd855990d57fcf790a97db4281c1d5 100644 --- a/source/libs/sync/test/sh/insert.tpl.json +++ b/source/libs/sync/test/sh/insert.tpl.json @@ -15,8 +15,11 @@ "databases": [ { "dbinfo": { - "name": "db1", - "drop": "yes", + "name": "db_auto", + "drop": "tpl_drop_tpl", + "wal_retention_period": -1, + "wal_retention_size": -1, + "drop": "no", "vgroups": tpl_vgroups_tpl, "replica": tpl_replica_tpl }, diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index efbe110f6f224b2740281dadf420b1e551fb4c2c..293e3e3c3569d8c5cf5607327f2ec576bda63009 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -381,8 +381,8 @@ void cliHandleResp(SCliConn* conn) { STraceId* trace = &transMsg.info.traceId; - tGTrace("%s conn %p %s received from %s, local info:%s, msg size:%d, code:0x%x", CONN_GET_INST_LABEL(conn), conn, - TMSG_INFO(pHead->msgType), conn->dst, conn->src, transMsg.contLen, transMsg.code); + tGTrace("%s conn %p %s received from %s, local info:%s, len:%d, code str:%s", CONN_GET_INST_LABEL(conn), conn, + TMSG_INFO(pHead->msgType), conn->dst, conn->src, transMsg.contLen, tstrerror(transMsg.code)); if (pCtx == NULL && CONN_NO_PERSIST_BY_APP(conn)) { tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn); @@ -549,6 +549,8 @@ static void addConnToPool(void* pool, SCliConn* conn) { CONN_CONSTRUCT_HASH_KEY(key, conn->ip, conn->port); tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap); conn->list = taosHashGet((SHashObj*)pool, key, strlen(key)); + } else { + tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap); } assert(conn->list != NULL); QUEUE_INIT(&conn->q); @@ -756,8 +758,8 @@ void cliSend(SCliConn* pConn) { uv_buf_t wb = uv_buf_init((char*)pHead, msgLen); STraceId* trace = &pMsg->info.traceId; - tGTrace("%s conn %p %s is sent to %s, local info %s", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pHead->msgType), - pConn->dst, pConn->src); + tGTrace("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn, + TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, pMsg->contLen); if (pHead->persist == 1) { CONN_SET_PERSIST_BY_APP(pConn); diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 4b579a1f9527bde98f9ac4d21723ed9d17e965d6..fd420203e8ac0adc4437c0b1fb987b676c43b1be 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -253,11 +253,11 @@ static void uvHandleReq(SSvrConn* pConn) { if (pConn->status == ConnNormal && pHead->noResp == 0) { transRefSrvHandle(pConn); - tGTrace("%s conn %p %s received from %s, local info:%s, msg size:%d", transLabel(pTransInst), pConn, + tGTrace("%s conn %p %s received from %s, local info:%s, len:%d", transLabel(pTransInst), pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, transMsg.contLen); } else { - tGTrace("%s conn %p %s received from %s, local info:%s, msg size:%d, resp:%d, code:%d", transLabel(pTransInst), - pConn, TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, transMsg.contLen, pHead->noResp, transMsg.code); + tGTrace("%s conn %p %s received from %s, local info:%s, len:%d, resp:%d, code:%d", transLabel(pTransInst), pConn, + TMSG_INFO(transMsg.msgType), pConn->dst, pConn->src, transMsg.contLen, pHead->noResp, transMsg.code); } // pHead->noResp = 1, @@ -296,11 +296,9 @@ void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) { if (nread > 0) { pBuf->len += nread; tTrace("%s conn %p total read:%d, current read:%d", transLabel(pTransInst), conn, pBuf->len, (int)nread); - if (transReadComplete(pBuf)) { + while (transReadComplete(pBuf)) { tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn); uvHandleReq(conn); - } else { - tTrace("%s conn %p read partial packet, continue to read", transLabel(pTransInst), conn); } return; } @@ -418,8 +416,8 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) { STrans* pTransInst = pConn->pTransInst; STraceId* trace = &pMsg->info.traceId; - tGTrace("%s conn %p %s is sent to %s, local info:%s, msglen:%d", transLabel(pTransInst), pConn, - TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, len); + tGTrace("%s conn %p %s is sent to %s, local info:%s, len:%d", transLabel(pTransInst), pConn, + TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, pMsg->contLen); pHead->msgLen = htonl(len); wb->base = msg; diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c index 787c9af31703df50a4b91589e20f5f9373d71c56..9be648b5189b09b6fae9150b6189df7aee0adbba 100644 --- a/source/libs/wal/src/walRead.c +++ b/source/libs/wal/src/walRead.c @@ -441,9 +441,12 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { return -1; } + taosThreadMutexLock(&pReader->mutex); + if (pReader->curInvalid || pReader->curVersion != ver) { if (walReadSeekVer(pReader, ver) < 0) { wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since %s", pReader->pWal->cfg.vgId, ver, terrstr()); + taosThreadMutexUnlock(&pReader->mutex); return -1; } seeked = true; @@ -464,6 +467,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; } ASSERT(0); + taosThreadMutexUnlock(&pReader->mutex); return -1; } } @@ -473,6 +477,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { wError("vgId:%d, unexpected wal log, index:%" PRId64 ", since head checksum not passed", pReader->pWal->cfg.vgId, ver); terrno = TSDB_CODE_WAL_FILE_CORRUPTED; + taosThreadMutexUnlock(&pReader->mutex); return -1; } @@ -480,6 +485,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { void *ptr = taosMemoryRealloc(pReader->pHead, sizeof(SWalCkHead) + pReader->pHead->head.bodyLen); if (ptr == NULL) { terrno = TSDB_CODE_WAL_OUT_OF_MEMORY; + taosThreadMutexUnlock(&pReader->mutex); return -1; } pReader->pHead = ptr; @@ -494,6 +500,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { terrno = TSDB_CODE_WAL_FILE_CORRUPTED; ASSERT(0); } + taosThreadMutexUnlock(&pReader->mutex); return -1; } @@ -503,6 +510,7 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { pReader->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; ASSERT(0); + taosThreadMutexUnlock(&pReader->mutex); return -1; } @@ -516,9 +524,12 @@ int32_t walReadVer(SWalReader *pReader, int64_t ver) { pReader->curInvalid = 1; terrno = TSDB_CODE_WAL_FILE_CORRUPTED; ASSERT(0); + taosThreadMutexUnlock(&pReader->mutex); return -1; } pReader->curVersion++; + taosThreadMutexUnlock(&pReader->mutex); + return 0; } diff --git a/source/libs/wal/src/walRef.c b/source/libs/wal/src/walRef.c index bd0f6fb1a8106d47b7c874f7e7ac3a99f9384911..2b29012040b25f8865455be78aceb63a080429d9 100644 --- a/source/libs/wal/src/walRef.c +++ b/source/libs/wal/src/walRef.c @@ -62,6 +62,11 @@ int32_t walRefVer(SWalRef *pRef, int64_t ver) { return 0; } +int32_t walPreRefVer(SWalRef *pRef, int64_t ver) { + pRef->refVer = ver; + return 0; +} + void walUnrefVer(SWalRef *pRef) { pRef->refId = -1; pRef->refFile = -1; diff --git a/source/libs/wal/src/walWrite.c b/source/libs/wal/src/walWrite.c index 4eadc92f705da24df20555ffef1bf82d7fca9858..eaf43ba7d77531cf4f0b7cc1a62e4497fd6de36e 100644 --- a/source/libs/wal/src/walWrite.c +++ b/source/libs/wal/src/walWrite.c @@ -26,7 +26,7 @@ int32_t walRestoreFromSnapshot(SWal *pWal, int64_t ver) { pIter = taosHashIterate(pWal->pRefHash, pIter); if (pIter == NULL) break; SWalRef *pRef = (SWalRef *)pIter; - if (pRef->refVer != -1) { + if (pRef->refVer != -1 && pRef->refVer <= ver) { taosHashCancelIterate(pWal->pRefHash, pIter); return -1; } @@ -116,15 +116,15 @@ int32_t walRollback(SWal *pWal, int64_t ver) { } walBuildIdxName(pWal, walGetCurFileFirstVer(pWal), fnameStr); - TdFilePtr pIdxTFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND); + TdFilePtr pIdxFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND); - if (pIdxTFile == NULL) { + if (pIdxFile == NULL) { ASSERT(0); taosThreadMutexUnlock(&pWal->mutex); return -1; } int64_t idxOff = walGetVerIdxOffset(pWal, ver); - code = taosLSeekFile(pIdxTFile, idxOff, SEEK_SET); + code = taosLSeekFile(pIdxFile, idxOff, SEEK_SET); if (code < 0) { ASSERT(0); taosThreadMutexUnlock(&pWal->mutex); @@ -132,7 +132,7 @@ int32_t walRollback(SWal *pWal, int64_t ver) { } // read idx file and get log file pos SWalIdxEntry entry; - if (taosReadFile(pIdxTFile, &entry, sizeof(SWalIdxEntry)) != sizeof(SWalIdxEntry)) { + if (taosReadFile(pIdxFile, &entry, sizeof(SWalIdxEntry)) != sizeof(SWalIdxEntry)) { ASSERT(0); taosThreadMutexUnlock(&pWal->mutex); return -1; @@ -140,24 +140,24 @@ int32_t walRollback(SWal *pWal, int64_t ver) { ASSERT(entry.ver == ver); walBuildLogName(pWal, walGetCurFileFirstVer(pWal), fnameStr); - TdFilePtr pLogTFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND); - if (pLogTFile == NULL) { - ASSERT(0); + TdFilePtr pLogFile = taosOpenFile(fnameStr, TD_FILE_WRITE | TD_FILE_READ | TD_FILE_APPEND); + if (pLogFile == NULL) { // TODO + terrno = TAOS_SYSTEM_ERROR(errno); taosThreadMutexUnlock(&pWal->mutex); return -1; } - code = taosLSeekFile(pLogTFile, entry.offset, SEEK_SET); + code = taosLSeekFile(pLogFile, entry.offset, SEEK_SET); if (code < 0) { - ASSERT(0); // TODO + terrno = TAOS_SYSTEM_ERROR(errno); taosThreadMutexUnlock(&pWal->mutex); return -1; } // validate offset SWalCkHead head; - ASSERT(taosValidFile(pLogTFile)); - int64_t size = taosReadFile(pLogTFile, &head, sizeof(SWalCkHead)); + ASSERT(taosValidFile(pLogFile)); + int64_t size = taosReadFile(pLogFile, &head, sizeof(SWalCkHead)); if (size != sizeof(SWalCkHead)) { ASSERT(0); taosThreadMutexUnlock(&pWal->mutex); @@ -180,14 +180,14 @@ int32_t walRollback(SWal *pWal, int64_t ver) { } // truncate old files - code = taosFtruncateFile(pLogTFile, entry.offset); + code = taosFtruncateFile(pLogFile, entry.offset); if (code < 0) { ASSERT(0); terrno = TAOS_SYSTEM_ERROR(errno); taosThreadMutexUnlock(&pWal->mutex); return -1; } - code = taosFtruncateFile(pIdxTFile, idxOff); + code = taosFtruncateFile(pIdxFile, idxOff); if (code < 0) { ASSERT(0); terrno = TAOS_SYSTEM_ERROR(errno); @@ -205,8 +205,10 @@ int32_t walRollback(SWal *pWal, int64_t ver) { ASSERT(((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->fileSize == 0); ((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->firstVer = -1; } - taosCloseFile(&pIdxTFile); - taosCloseFile(&pLogTFile); + taosCloseFile(&pIdxFile); + taosCloseFile(&pLogFile); + + walSaveMeta(pWal); // unlock taosThreadMutexUnlock(&pWal->mutex); @@ -408,7 +410,7 @@ static FORCE_INLINE int32_t walWriteImpl(SWal *pWal, int64_t index, tmsg_t msgTy pWal->writeHead.head.version = index; pWal->writeHead.head.bodyLen = bodyLen; pWal->writeHead.head.msgType = msgType; - pWal->writeHead.head.ingestTs = taosGetTimestampMs(); + pWal->writeHead.head.ingestTs = 0; // sync info for sync module pWal->writeHead.head.syncMeta = syncMeta; diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 32f0fdf7b3ace44d51f4b4203e6b494f342a7ccb..26aafd743fb7ad8a89e0f73674d25eb98a884147 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -134,21 +134,95 @@ int32_t taosUcs4ToMbs(TdUcs4 *ucs4, int32_t ucs4_max_len, char *mbs) { #endif } +typedef struct { + iconv_t conv; + int8_t inUse; +} SConv; + +SConv *gConv = NULL; +int32_t convUsed = 0; +int32_t gConvMaxNum = 0; + +void taosConvInit(void) { + gConvMaxNum = 512; + gConv = taosMemoryCalloc(gConvMaxNum, sizeof(SConv)); + for (int32_t i = 0; i < gConvMaxNum; ++i) { + gConv[i].conv = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); + if ((iconv_t)-1 == gConv[i].conv || (iconv_t)0 == gConv[i].conv) { + ASSERT(0); + } + } +} + +void taosConvDestroy() { + for (int32_t i = 0; i < gConvMaxNum; ++i) { + iconv_close(gConv[i].conv); + } + taosMemoryFreeClear(gConv); + gConvMaxNum = -1; +} + +iconv_t taosAcquireConv(int32_t *idx) { + if (gConvMaxNum <= 0) { + *idx = -1; + return iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); + } + + while (true) { + int32_t used = atomic_add_fetch_32(&convUsed, 1); + if (used > gConvMaxNum) { + used = atomic_sub_fetch_32(&convUsed, 1); + sched_yield(); + continue; + } + + break; + } + + int32_t startId = taosGetSelfPthreadId() % gConvMaxNum; + while (true) { + if (gConv[startId].inUse) { + startId = (startId + 1) % gConvMaxNum; + continue; + } + + int8_t old = atomic_val_compare_exchange_8(&gConv[startId].inUse, 0, 1); + if (0 == old) { + break; + } + } + + *idx = startId; + return gConv[startId].conv; +} + +void taosReleaseConv(int32_t idx, iconv_t conv) { + if (idx < 0) { + iconv_close(conv); + return; + } + + atomic_store_8(&gConv[idx].inUse, 0); + atomic_sub_fetch_32(&convUsed, 1); +} + bool taosMbsToUcs4(const char *mbs, size_t mbsLength, TdUcs4 *ucs4, int32_t ucs4_max_len, int32_t *len) { #ifdef DISALLOW_NCHAR_WITHOUT_ICONV printf("Nchar cannot be read and written without iconv, please install iconv library and recompile TDengine.\n"); return -1; #else memset(ucs4, 0, ucs4_max_len); - iconv_t cd = iconv_open(DEFAULT_UNICODE_ENCODEC, tsCharset); + + int32_t idx = -1; + iconv_t conv = taosAcquireConv(&idx); size_t ucs4_input_len = mbsLength; size_t outLeft = ucs4_max_len; - if (iconv(cd, (char **)&mbs, &ucs4_input_len, (char **)&ucs4, &outLeft) == -1) { - iconv_close(cd); + if (iconv(conv, (char **)&mbs, &ucs4_input_len, (char **)&ucs4, &outLeft) == -1) { + taosReleaseConv(idx, conv); return false; } - iconv_close(cd); + taosReleaseConv(idx, conv); if (len != NULL) { *len = (int32_t)(ucs4_max_len - outLeft); if (*len < 0) { diff --git a/source/util/src/terror.c b/source/util/src/terror.c index f20c5d8593f4701469be031f4a2928169f144423..51dfa1ce1372d2ba8c3424c2be22c93286a00582 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -261,6 +261,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_CONFLICT_WITH_TOPIC,"Field used by topic") TAOS_DEFINE_ERROR(TSDB_CODE_MND_SINGLE_STB_MODE_DB, "Database is single stable mode") TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_SCHEMA_VER, "Invalid schema version while alter stb") TAOS_DEFINE_ERROR(TSDB_CODE_MND_STABLE_UID_NOT_MATCH, "Invalid stable uid while alter stb") +TAOS_DEFINE_ERROR(TSDB_CODE_MND_FIELD_CONFLICT_WITH_TSMA, "Field used by tsma") // mnode-trans TAOS_DEFINE_ERROR(TSDB_CODE_MND_TRANS_ALREADY_EXIST, "Transaction already exists") @@ -560,6 +561,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_ONLY_SUPPORT_SINGLE_TABLE, "Only support single TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_SMA_INDEX, "Invalid sma index") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_SELECTED_EXPR, "Invalid SELECTed expression") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_GET_META_ERROR, "Fail to get table info") +TAOS_DEFINE_ERROR(TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS, "Not unique table/alias") TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INTERNAL_ERROR, "Parser internal error") //planner diff --git a/tests/develop-test/5-taos-tools/.gitkeep b/tests/develop-test/5-taos-tools/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py new file mode 100644 index 0000000000000000000000000000000000000000..734f7da974f346c9975043290dca15f2b9a2a269 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py @@ -0,0 +1,161 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +from util.taosadapter import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + tAdapter.init("") + tAdapter.deploy() + tAdapter.start() + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json" % binPath + tdLog.info("%s" % cmd) + os.system(cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.stb1)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select distinct(c5) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c6) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c7) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c8) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c9) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c10) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c11) from db.stb1") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c12) from db.stb1") + tdSql.checkData(0, 0, None) + + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.`stb1-2`)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.`stb1-2`") + tdSql.checkData(0, 0, 160) + tdSql.query("select distinct(c5) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c6) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c7) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c8) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c9) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c10) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c11) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c12) from db.`stb1-2`") + tdSql.checkData(0, 0, None) + + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.stb2)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb2") + tdSql.checkData(0, 0, 160) + tdSql.query("show databases") + tdSql.checkData(2, 14, "us") + + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.`stb2-2`)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.`stb2-2`") + tdSql.checkData(0, 0, 160) + + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_auto_create_table.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.stb3)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("show databases") + tdSql.checkData(2, 14, "ns") + + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.`stb3-2`)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.`stb3-2`") + tdSql.checkData(0, 0, 160) + + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_auto_create_table.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.stb4)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb4") + tdSql.checkData(0, 0, 160) + + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.`stb4-2`)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.`stb4-2`") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py b/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py new file mode 100644 index 0000000000000000000000000000000000000000..1d21c517e60715f88ddab265a1a380d4601edd80 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py @@ -0,0 +1,313 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +import subprocess +import time + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -F 7 -H 9 -n 10 -t 2 -x -y -M -C -d newtest -l 5 -A binary,nchar\(31\) -b tinyint,binary\(23\),bool,nchar -w 29 -E -m $%%^*" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("use newtest") + tdSql.query("select count(*) from newtest.meters") + tdSql.checkData(0, 0, 20) + tdSql.query("describe meters") + tdSql.checkRows(8) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(1, 1, "TINYINT") + tdSql.checkData(2, 1, "VARCHAR") + tdSql.checkData(2, 2, 23) + tdSql.checkData(3, 1, "BOOL") + tdSql.checkData(4, 1, "NCHAR") + tdSql.checkData(4, 2, 29) + tdSql.checkData(5, 1, "INT") + tdSql.checkData(6, 1, "VARCHAR") + tdSql.checkData(6, 2, 29) + tdSql.checkData(6, 3, "TAG") + tdSql.checkData(7, 1, "NCHAR") + tdSql.checkData(7, 2, 31) + tdSql.checkData(7, 3, "TAG") + tdSql.query("select distinct(tbname) from meters where tbname like '$%^*%'") + tdSql.checkRows(2) + tdSql.execute("drop database if exists newtest") + + cmd = "%s -F 7 -n 10 -t 2 -y -M -I stmt" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from (select distinct(tbname) from test.meters)") + tdSql.checkData(0, 0, 2) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 20) + + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 2>&1 | grep sleep | wc -l" %binPath + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 2): + tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes)) + + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 2>&1 | grep sleep | wc -l" %binPath + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 3): + tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes)) + + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I sml 2>&1 | grep sleep | wc -l" %binPath + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 2): + tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes)) + + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I sml 2>&1 | grep sleep | wc -l" %binPath + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 3): + tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes)) + + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -I stmt 2>&1 | grep sleep | wc -l" %binPath + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 2): + tdLog.exit("expected sleep times 2, actual %d" % int(sleepTimes)) + + cmd = "%s -n 3 -t 3 -B 2 -i 1 -G -y -T 1 -r 1 -I stmt 2>&1 | grep sleep | wc -l" %binPath + sleepTimes = subprocess.check_output(cmd, shell=True).decode("utf-8") + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + if (int(sleepTimes) != 3): + tdLog.exit("expected sleep times 3, actual %d" % int(sleepTimes)) + + cmd = "%s -S 17 -n 3 -t 1 -y -x" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select last(ts) from test.meters") + tdSql.checkData(0, 0 , "2017-07-14 10:40:00.034") + + cmd = "%s -N -I taosc -t 11 -n 11 -y -x -E" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("use test") + tdSql.query("show stables") + tdSql.checkRows(0) + tdSql.query("show tables") + tdSql.checkRows(11) + tdSql.query("select count(*) from `d10`") + tdSql.checkData(0, 0, 11) + + cmd = "%s -N -I rest -t 11 -n 11 -y -x" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("use test") + tdSql.query("show stables") + tdSql.checkRows(0) + tdSql.query("show tables") + tdSql.checkRows(11) + tdSql.query("select count(*) from d10") + tdSql.checkData(0, 0, 11) + + cmd = "%s -N -I stmt -t 11 -n 11 -y -x" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("use test") + tdSql.query("show stables") + tdSql.checkRows(0) + tdSql.query("show tables") + tdSql.checkRows(11) + tdSql.query("select count(*) from d10") + tdSql.checkData(0, 0, 11) + + cmd = "%s -N -I sml -y" %binPath + tdLog.info("%s" % cmd) + assert(os.system("%s" % cmd) !=0 ) + + cmd = "%s -n 1 -t 1 -y -b bool" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "BOOL") + + cmd = "%s -n 1 -t 1 -y -b tinyint" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "TINYINT") + + cmd = "%s -n 1 -t 1 -y -b utinyint" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "TINYINT UNSIGNED") + + cmd = "%s -n 1 -t 1 -y -b smallint" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "SMALLINT") + + cmd = "%s -n 1 -t 1 -y -b usmallint" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "SMALLINT UNSIGNED") + + cmd = "%s -n 1 -t 1 -y -b int" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "INT") + + cmd = "%s -n 1 -t 1 -y -b uint" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "INT UNSIGNED") + + cmd = "%s -n 1 -t 1 -y -b bigint" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "BIGINT") + + cmd = "%s -n 1 -t 1 -y -b ubigint" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "BIGINT UNSIGNED") + + cmd = "%s -n 1 -t 1 -y -b timestamp" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "TIMESTAMP") + + cmd = "%s -n 1 -t 1 -y -b float" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "FLOAT") + + cmd = "%s -n 1 -t 1 -y -b double" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "DOUBLE") + + cmd = "%s -n 1 -t 1 -y -b nchar" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "NCHAR") + + cmd = "%s -n 1 -t 1 -y -b nchar\(7\)" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "NCHAR") + + cmd = "%s -n 1 -t 1 -y -b binary" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "VARCHAR") + + cmd = "%s -n 1 -t 1 -y -b binary\(7\)" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(1, 1, "VARCHAR") + + cmd = "%s -n 1 -t 1 -y -A json\(7\)" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe test.meters") + tdSql.checkData(4, 1, "JSON") + + cmd = "%s -n 1 -t 1 -y -b int,x" %binPath + tdLog.info("%s" % cmd) + assert(os.system("%s" % cmd) != 0) + + cmd = "%s -n 1 -t 1 -y -A int,json" %binPath + tdLog.info("%s" % cmd) + assert(os.system("%s" % cmd) != 0) + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_tags.csv b/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_tags.csv new file mode 100644 index 0000000000000000000000000000000000000000..8e2afd342773582f9484b796cdc0b84736e8194e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_tags.csv @@ -0,0 +1 @@ +17 \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_use_ts.csv b/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_use_ts.csv new file mode 100644 index 0000000000000000000000000000000000000000..f92eedd50d35e1666d8d74a999fd968271944a57 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/csv/sample_use_ts.csv @@ -0,0 +1,3 @@ +1641976781445,1 +1641976781446,2 +1641976781447,3 \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py b/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..9104d6309611916cbd88a1d61a1446bf54a25859 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py @@ -0,0 +1,88 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-13928] taosBenchmark improve user interface + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/custom_col_tag.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkData(0, 0, "ts") + tdSql.checkData(1, 0, "first_type") + tdSql.checkData(2, 0, "second_type") + tdSql.checkData(3, 0, "second_type_1") + tdSql.checkData(4, 0, "second_type_2") + tdSql.checkData(5, 0, "second_type_3") + tdSql.checkData(6, 0, "second_type_4") + tdSql.checkData(7, 0, "third_type") + tdSql.checkData(8, 0, "forth_type") + tdSql.checkData(9, 0, "forth_type_1") + tdSql.checkData(10, 0, "forth_type_2") + tdSql.checkData(11, 0, "single") + tdSql.checkData(12, 0, "multiple") + tdSql.checkData(13, 0, "multiple_1") + tdSql.checkData(14, 0, "multiple_2") + tdSql.checkData(15, 0, "multiple_3") + tdSql.checkData(16, 0, "multiple_4") + tdSql.checkData(17, 0, "thensingle") + tdSql.checkData(18, 0, "thenmultiple") + tdSql.checkData(19, 0, "thenmultiple_1") + tdSql.checkData(20, 0, "thenmultiple_2") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py new file mode 100644 index 0000000000000000000000000000000000000000..18b22b51cefdc831c4f459570334fd4ec54c700a --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py @@ -0,0 +1,70 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/default.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.stb)") + tdSql.checkData(0, 0, 10) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 100) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py new file mode 100644 index 0000000000000000000000000000000000000000..99e8cd36a4cf1dae08baf93ef4d6338bb08dc7bd --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py @@ -0,0 +1,96 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +import subprocess +import time + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-13823] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -n 100 -t 100 -y" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("use test") + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 10000) + + tdSql.query("describe meters") + tdSql.checkRows(6) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(0, 0, "ts") + tdSql.checkData(1, 0, "current") + tdSql.checkData(1, 1, "FLOAT") + tdSql.checkData(2, 0, "voltage") + tdSql.checkData(2, 1, "INT") + tdSql.checkData(3, 0, "phase") + tdSql.checkData(3, 1, "FLOAT") + tdSql.checkData(4, 0, "groupid") + tdSql.checkData(4, 1, "INT") + tdSql.checkData(4, 3, "TAG") + tdSql.checkData(5, 0, "location") + tdSql.checkData(5, 1, "VARCHAR") + tdSql.checkData(5, 2, 16) + tdSql.checkData(5, 3, "TAG") + + tdSql.query("select count(*) from test.meters where groupid >= 0") + tdSql.checkData(0, 0, 10000) + + tdSql.query("select count(*) from test.meters where location = 'San Francisco' or location = 'Los Angles' or location = 'San Diego' or location = 'San Jose' or \ + location = 'Palo Alto' or location = 'Campbell' or location = 'Mountain View' or location = 'Sunnyvale' or location = 'Santa Clara' or location = 'Cupertino' ") + tdSql.checkData(0, 0, 10000) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py new file mode 100644 index 0000000000000000000000000000000000000000..38332f7b64003c2595910fc7632c2a88fc3d9747 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py @@ -0,0 +1,330 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +from util.taosadapter import * + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + tAdapter.init("") + tAdapter.deploy() + tAdapter.start() + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 160) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkRows(29) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(1, 1, "TIMESTAMP") + tdSql.checkData(2, 1, "INT") + tdSql.checkData(3, 1, "BIGINT") + tdSql.checkData(4, 1, "FLOAT") + tdSql.checkData(5, 1, "DOUBLE") + tdSql.checkData(6, 1, "SMALLINT") + tdSql.checkData(7, 1, "TINYINT") + tdSql.checkData(8, 1, "BOOL") + tdSql.checkData(9, 1, "NCHAR") + tdSql.checkData(9, 2, 29) + tdSql.checkData(10, 1, "INT UNSIGNED") + tdSql.checkData(11, 1, "BIGINT UNSIGNED") + tdSql.checkData(12, 1, "TINYINT UNSIGNED") + tdSql.checkData(13, 1, "SMALLINT UNSIGNED") + tdSql.checkData(14, 1, "VARCHAR") + tdSql.checkData(14, 2, 23) + tdSql.checkData(15, 1, "TIMESTAMP") + tdSql.checkData(16, 1, "INT") + tdSql.checkData(17, 1, "BIGINT") + tdSql.checkData(18, 1, "FLOAT") + tdSql.checkData(19, 1, "DOUBLE") + tdSql.checkData(20, 1, "SMALLINT") + tdSql.checkData(21, 1, "TINYINT") + tdSql.checkData(22, 1, "BOOL") + tdSql.checkData(23, 1, "NCHAR") + tdSql.checkData(23, 2, 17) + tdSql.checkData(24, 1, "INT UNSIGNED") + tdSql.checkData(25, 1, "BIGINT UNSIGNED") + tdSql.checkData(26, 1, "TINYINT UNSIGNED") + tdSql.checkData(27, 1, "SMALLINT UNSIGNED") + tdSql.checkData(28, 1, "VARCHAR") + tdSql.checkData(28, 2, 19) + tdSql.query("select count(*) from db.stb where c1 >= 0 and c1 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c2 >= 0 and c2 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c3 >= 0 and c3 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c4 >= 0 and c4 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c5 >= 0 and c5 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c6 >= 0 and c6 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c8 = 'd1' or c8 = 'd2'") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c9 >= 0 and c9 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c10 >= 0 and c10 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c11 >= 0 and c11 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c12 >= 0 and c12 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c13 = 'b1' or c13 = 'b2'") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t1 >= 0 and t1 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t2 >= 0 and t2 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t3 >= 0 and t3 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t4 >= 0 and t4 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t5 >= 0 and t5 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t6 >= 0 and t6 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t8 = 'd1' or t8 = 'd2'") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t9 >= 0 and t9 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t10 >= 0 and t10 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t11 >= 0 and t11 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t12 >= 0 and t12 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t13 = 'b1' or t13 = 'b2'") + tdSql.checkData(0, 0, 160) + + + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 160) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkRows(27) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(1, 1, "INT") + tdSql.checkData(2, 1, "BIGINT") + tdSql.checkData(3, 1, "FLOAT") + tdSql.checkData(4, 1, "DOUBLE") + tdSql.checkData(5, 1, "SMALLINT") + tdSql.checkData(6, 1, "TINYINT") + tdSql.checkData(7, 1, "BOOL") + tdSql.checkData(8, 1, "NCHAR") + tdSql.checkData(8, 2, 32) + tdSql.checkData(9, 1, "INT UNSIGNED") + tdSql.checkData(10, 1, "BIGINT UNSIGNED") + tdSql.checkData(11, 1, "TINYINT UNSIGNED") + tdSql.checkData(12, 1, "SMALLINT UNSIGNED") + tdSql.checkData(13, 1, "VARCHAR") + tdSql.checkData(13, 2, 32) + tdSql.checkData(14, 1, "NCHAR") + tdSql.checkData(15, 1, "NCHAR") + tdSql.checkData(16, 1, "NCHAR") + tdSql.checkData(17, 1, "NCHAR") + tdSql.checkData(18, 1, "NCHAR") + tdSql.checkData(19, 1, "NCHAR") + tdSql.checkData(20, 1, "NCHAR") + tdSql.checkData(21, 1, "NCHAR") + tdSql.checkData(22, 1, "NCHAR") + tdSql.checkData(23, 1, "NCHAR") + tdSql.checkData(24, 1, "NCHAR") + tdSql.checkData(25, 1, "NCHAR") + tdSql.checkData(26, 1, "NCHAR") + + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 160) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkRows(29) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(1, 1, "TIMESTAMP") + tdSql.checkData(2, 1, "INT") + tdSql.checkData(3, 1, "BIGINT") + tdSql.checkData(4, 1, "FLOAT") + tdSql.checkData(5, 1, "DOUBLE") + tdSql.checkData(6, 1, "SMALLINT") + tdSql.checkData(7, 1, "TINYINT") + tdSql.checkData(8, 1, "BOOL") + tdSql.checkData(9, 1, "NCHAR") + tdSql.checkData(9, 2, 29) + tdSql.checkData(10, 1, "INT UNSIGNED") + tdSql.checkData(11, 1, "BIGINT UNSIGNED") + tdSql.checkData(12, 1, "TINYINT UNSIGNED") + tdSql.checkData(13, 1, "SMALLINT UNSIGNED") + tdSql.checkData(14, 1, "VARCHAR") + tdSql.checkData(14, 2, 23) + tdSql.checkData(15, 1, "TIMESTAMP") + tdSql.checkData(16, 1, "INT") + tdSql.checkData(17, 1, "BIGINT") + tdSql.checkData(18, 1, "FLOAT") + tdSql.checkData(19, 1, "DOUBLE") + tdSql.checkData(20, 1, "SMALLINT") + tdSql.checkData(21, 1, "TINYINT") + tdSql.checkData(22, 1, "BOOL") + tdSql.checkData(23, 1, "NCHAR") + tdSql.checkData(23, 2, 17) + tdSql.checkData(24, 1, "INT UNSIGNED") + tdSql.checkData(25, 1, "BIGINT UNSIGNED") + tdSql.checkData(26, 1, "TINYINT UNSIGNED") + tdSql.checkData(27, 1, "SMALLINT UNSIGNED") + tdSql.checkData(28, 1, "VARCHAR") + tdSql.checkData(28, 2, 19) + + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 160) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkRows(29) + tdSql.checkData(0, 1, "TIMESTAMP") + tdSql.checkData(1, 1, "TIMESTAMP") + tdSql.checkData(2, 1, "INT") + tdSql.checkData(3, 1, "BIGINT") + tdSql.checkData(4, 1, "FLOAT") + tdSql.checkData(5, 1, "DOUBLE") + tdSql.checkData(6, 1, "SMALLINT") + tdSql.checkData(7, 1, "TINYINT") + tdSql.checkData(8, 1, "BOOL") + tdSql.checkData(9, 1, "NCHAR") + tdSql.checkData(9, 2, 29) + tdSql.checkData(10, 1, "INT UNSIGNED") + tdSql.checkData(11, 1, "BIGINT UNSIGNED") + tdSql.checkData(12, 1, "TINYINT UNSIGNED") + tdSql.checkData(13, 1, "SMALLINT UNSIGNED") + tdSql.checkData(14, 1, "VARCHAR") + tdSql.checkData(14, 2, 23) + tdSql.checkData(15, 1, "TIMESTAMP") + tdSql.checkData(16, 1, "INT") + tdSql.checkData(17, 1, "BIGINT") + tdSql.checkData(18, 1, "FLOAT") + tdSql.checkData(19, 1, "DOUBLE") + tdSql.checkData(20, 1, "SMALLINT") + tdSql.checkData(21, 1, "TINYINT") + tdSql.checkData(22, 1, "BOOL") + tdSql.checkData(23, 1, "NCHAR") + tdSql.checkData(23, 2, 17) + tdSql.checkData(24, 1, "INT UNSIGNED") + tdSql.checkData(25, 1, "BIGINT UNSIGNED") + tdSql.checkData(26, 1, "TINYINT UNSIGNED") + tdSql.checkData(27, 1, "SMALLINT UNSIGNED") + tdSql.checkData(28, 1, "VARCHAR") + tdSql.checkData(28, 2, 19) + tdSql.query("select count(*) from db.stb where c0 >= 0 and c0 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c1 >= 0 and c1 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c2 >= 0 and c2 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c3 >= 0 and c3 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c4 >= 0 and c4 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c5 >= 0 and c5 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c6 >= 0 and c6 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c8 like 'd1%' or c8 like 'd2%'") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c9 >= 0 and c9 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c10 >= 0 and c10 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c11 >= 0 and c11 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c12 >= 0 and c12 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where c13 like 'b1%' or c13 like 'b2%'") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t0 >= 0 and t0 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t1 >= 0 and t1 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t2 >= 0 and t2 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t3 >= 0 and t3 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t4 >= 0 and t4 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t5 >= 0 and t5 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t6 >= 0 and t6 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t8 like 'd1%' or t8 like 'd2%'") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t9 >= 0 and t9 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t10 >= 0 and t10 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t11 >= 0 and t11 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t12 >= 0 and t12 <= 10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb where t13 like 'b1%' or t13 like 'b2%'") + tdSql.checkData(0, 0, 160) + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py b/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py new file mode 100644 index 0000000000000000000000000000000000000000..ebddff56436c53ca1f15b714e58204aaeeee6c89 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py @@ -0,0 +1,91 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -F abc -P abc -I abc -T abc -H abc -i abc -S abc -B abc -r abc -t abc -n abc -l abc -w abc -w 16385 -R abc -O abc -a abc -n 2 -t 2 -r 1 -y" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, 4) + + cmd = "%s non_exist_opt" %binPath + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) != 0) + + cmd = "%s -f non_exist_file -y" %binPath + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) != 0) + + cmd = "%s -h non_exist_host -y" %binPath + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) != 0) + + cmd = "%s -p non_exist_pass -y" %binPath + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) != 0) + + cmd = "%s -u non_exist_user -y" %binPath + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) != 0) + + cmd = "%s -c non_exist_dir -n 1 -t 1 -o non_exist_path -y" %binPath + tdLog.info("%s" % cmd) + assert (os.system("%s" % cmd) == 0) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/custom_col_tag.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/custom_col_tag.json new file mode 100644 index 0000000000000000000000000000000000000000..fec5775cd603db99dc79b834db37554839bd4292 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/custom_col_tag.json @@ -0,0 +1,83 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": -10, + "childtable_offset": 10, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "partial_col_num": 0, + "columns": [{ + "type": "INT", + "name": "first_type" + }, { + "type": "UINT", + "name": "second_type", + "count": 5 + },{ + "type": "double", + "name": "third_type" + },{ + "type": "float", + "name": "forth_type", + "count": 3 + }], + "tags": [{ + "type": "INT", + "name": "single" + }, { + "type": "UINT", + "name": "multiple", + "count": 5 + },{ + "type": "double", + "name": "thensingle" + },{ + "type": "float", + "name": "thenmultiple", + "count": 3 + }] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/default.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/default.json new file mode 100644 index 0000000000000000000000000000000000000000..d4b2aae2fb7bf1a3612e60f395a1c5ed26ceca8f --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/default.json @@ -0,0 +1,27 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db" + }, + "super_tables": [{ + "name": "stb", + "childtable_prefix": "stb_", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}] + }] + }] +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json new file mode 100644 index 0000000000000000000000000000000000000000..868ff99842d9f1015e24a17aa91fc45700f036f3 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_auto_create_table.json @@ -0,0 +1,79 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 100, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ns", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "rest", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb3-2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb3-2_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "rest", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..39dd1d185e7b996917be194275da0e7a807f8274 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_insert_alltypes.json @@ -0,0 +1,54 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ns", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "rest", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json new file mode 100644 index 0000000000000000000000000000000000000000..459e496f0ba5f1b0b90f755969a1213a7f42fc6a --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/rest_query.json @@ -0,0 +1,27 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_mode": "rest", + "connection_pool_size": 10, + "response_buffer": 10000, + "specified_table_query": + { + "query_times": 1, + "sqls": + [{ + "sql": "select count(*) from db.stb", + "result": "rest_query_specified" + }] + }, + "super_table_query": { + "stblname": "stb", + "sqls": [ + { + "sql": "select count(*) from xxxx", + "result": "rest_query_super" + } + ] + } +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json new file mode 100644 index 0000000000000000000000000000000000000000..1bbeca672b42dfe6b5b48c726fa5b11408ffcaf2 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_auto_create_table.json @@ -0,0 +1,79 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 100, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb4-2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb4-2_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..06617dc2bfb2b9dbc765ee9747fce02f32f31b57 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_insert_alltypes.json @@ -0,0 +1,54 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_interlace.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_interlace.json new file mode 100644 index 0000000000000000000000000000000000000000..9e6f17f2cf6e62902248fb779e76dbf6142b5a31 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_interlace.json @@ -0,0 +1,79 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 30, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 60, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..5a373888a6cdfe2ee8a4880863459e91c87a8fa2 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_json_alltypes.json @@ -0,0 +1,254 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BOOL"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TINYINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb3_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "SMALLINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb4_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb5", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb5_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BIGINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb6", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb6_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "FLOAT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb7", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb7_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "DOUBLE"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb8", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb8_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 8}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb9", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb9_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "NCHAR", "len": 8}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_json.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_json.json new file mode 100644 index 0000000000000000000000000000000000000000..84acbcf33d728b5c66ab322ef2ccc2b6bb40a75e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_json.json @@ -0,0 +1,98 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [ + { + "dbinfo": { + "name": "db3", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096 + }, + "super_tables": [ + { + "name": "stb1", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + } + ], + "tags": [ + { + "type": "INT" + } + ] + },{ + "name": "stb2", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "json", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 3, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + } + ], + "tags": [ + { + "type": "INT" + } + ] + } + ] + } + ] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_line.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_line.json new file mode 100644 index 0000000000000000000000000000000000000000..fe1b07821d7d8331c8dc7dd197bac18076df0898 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_line.json @@ -0,0 +1,258 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [ + { + "dbinfo": { + "name": "db2", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096 + }, + "super_tables": [ + { + "name": "stb1", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ], + "tags": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ] + },{ + "name": "stb2", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 6, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ], + "tags": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ] + } + ] + } + ] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_telnet.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_telnet.json new file mode 100644 index 0000000000000000000000000000000000000000..f36412c305aa1ef5db6e0a9430d3dc847e807419 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_rest_telnet.json @@ -0,0 +1,178 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096 + }, + "super_tables": [ + { + "name": "stb1", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + } + ], + "tags": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ] + },{ + "name": "stb2", + "child_table_exists": "no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [ + { + "type": "INT" + } + ], + "tags": [ + { + "type": "INT" + }, + { + "type": "BIGINT" + }, + { + "type": "FLOAT" + }, + { + "type": "DOUBLE" + }, + { + "type": "SMALLINT" + }, + { + "type": "TINYINT" + }, + { + "type": "BOOL" + }, + { + "type": "NCHAR", + "len": 17, + "count": 1 + }, + { + "type": "UINT" + }, + { + "type": "UBIGINT" + }, + { + "type": "UTINYINT" + }, + { + "type": "USMALLINT" + }, + { + "type": "BINARY", + "len": 19, + "count": 1 + } + ] + } + ] + } + ] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..545d85cf1b0e7b02b160ae72ec29494c5854cc0b --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json @@ -0,0 +1,354 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BOOL"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TINYINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb3", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb3_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "UTINYINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb4", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb4_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "SMALLINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb5", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb5_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "USMALLINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb6", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb6_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb7", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb7_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "UINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb8", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb8_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BIGINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb9", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb9_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "UBIGINT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb10", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb10_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "FLOAT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb11", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb11_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "DOUBLE"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb12", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb12_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "BINARY", "len": 8}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb13", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb13_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml", + "line_protocol": "telnet", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "NCHAR", "len": 8}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json new file mode 100644 index 0000000000000000000000000000000000000000..29cd6770381dd40683763f9f485b7345d6da6070 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json @@ -0,0 +1,82 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "telnet_tcp_port": 6046, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb1_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "telnet", + "tcp_transfer": "yes", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb2_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "sml-rest", + "line_protocol": "telnet", + "tcp_transfer": "yes", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/specified_subscribe.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/specified_subscribe.json new file mode 100644 index 0000000000000000000000000000000000000000..4f730a9cbfb604a80fcbbc19e8a14e814a9c5f6a --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/specified_subscribe.json @@ -0,0 +1,24 @@ +{ + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "specified_table_query": { + "threads": 1, + "mode": "async", + "interval": 1000, + "restart": "no", + "keepProgress": "yes", + "resubAfterConsume": 10, + "endAfterConsume": 1, + "sqls": [ + { + "sql": "select * from stb;" + } + ] + } +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json new file mode 100644 index 0000000000000000000000000000000000000000..e6e773f6167a42f19074a412527f8ccad5689e9e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_auto_create_table.json @@ -0,0 +1,79 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 100, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "us", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb2-2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb2-2_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..2e53cdcb1d95da75db8a1a8afff2ddf6d72cc4bd --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/stmt_insert_alltypes.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "stmt", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "partial_col_num": 999, + "columns": [{"type": "TIMESTAMP","max": 10, "min": 0},{"type": "INT","max": 10, "min": 0}, {"type": "BIGINT","max": 10, "min": 0}, {"type": "FLOAT","max": 10, "min": 0}, {"type": "DOUBLE","max": 10, "min": 0}, {"type": "SMALLINT","max": 10, "min": 0}, {"type": "TINYINT","max": 10, "min": 0}, {"type": "BOOL","max": 10, "min": 0}, {"type": "NCHAR","len": 29, "count":1, + "values": ["d1", "d2"] + }, {"type": "UINT","max": 10, "min": 0}, {"type": "UBIGINT","max": 10, "min": 0}, {"type": "UTINYINT","max": 10, "min": 0}, {"type": "USMALLINT","max": 10, "min": 0}, {"type": "BINARY", "len": 23, "count":1, + "values": ["b1","b2"] + }], + "tags": [{"type": "TIMESTAMP","max": 10, "min": 0},{"type": "INT","max": 10, "min": 0}, {"type": "BIGINT","max": 10, "min": 0}, {"type": "FLOAT","max": 10, "min": 0}, {"type": "DOUBLE","max": 10, "min": 0}, {"type": "SMALLINT","max": 10, "min": 0}, {"type": "TINYINT","max": 10, "min": 0}, {"type": "BOOL","max": 10, "min": 0}, {"type": "NCHAR","len": 17, "count":1, + "values": ["d1", "d2"] + }, {"type": "UINT","max": 10, "min": 0}, {"type": "UBIGINT","max": 10, "min": 0}, {"type": "UTINYINT","max": 10, "min": 0}, {"type": "USMALLINT","max": 10, "min": 0}, {"type": "BINARY", "len": 19, "count":1, + "values": ["b1","b2"] + }] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/super_subscribe.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/super_subscribe.json new file mode 100644 index 0000000000000000000000000000000000000000..6284caf8b26b85bc379df16a7f3914fa9a8a9297 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/super_subscribe.json @@ -0,0 +1,24 @@ +{ + "filetype": "subscribe", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "databases": "db", + "confirm_parameter_prompt": "no", + "super_table_query": { + "stblname": "stb", + "threads": 1, + "mode": "sync", + "interval": 1000, + "restart": "yes", + "keepProgress": "yes", + "endAfterConsume": 1, + "sqls": [ + { + "sql": "select * from xxxx;" + } + ] + } +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json new file mode 100644 index 0000000000000000000000000000000000000000..f683cc016b1e068c6ebf2933084977ffeb11966c --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_auto_create_table.json @@ -0,0 +1,81 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 100, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb1", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "partial_col_num": 5, + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + },{ + "name": "stb1-2", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb1-2_", + "escape_character": "yes", + "auto_create_table": "yes", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 5, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "partial_col_num": 5, + "columns": [{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json new file mode 100644 index 0000000000000000000000000000000000000000..5694b584075d8acfe305744f8ac471c6baff177e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_insert_alltypes.json @@ -0,0 +1,63 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": -10, + "childtable_offset": 10, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "partial_col_num": 999, + "columns": [{"type": "TIMESTAMP","max": 10, "min": 0},{"type": "INT","max": 10, "min": 0}, {"type": "BIGINT","max": 10, "min": 0}, {"type": "FLOAT","max": 10, "min": 0}, {"type": "DOUBLE","max": 10, "min": 0}, {"type": "SMALLINT","max": 10, "min": 0}, {"type": "TINYINT","max": 10, "min": 0}, {"type": "BOOL","max": 10, "min": 0}, {"type": "NCHAR","len": 29, "count":1, + "values": ["d1", "d2"] + }, {"type": "UINT","max": 10, "min": 0}, {"type": "UBIGINT","max": 10, "min": 0}, {"type": "UTINYINT","max": 10, "min": 0}, {"type": "USMALLINT","max": 10, "min": 0}, {"type": "BINARY", "len": 23, "count":1, + "values": ["b1","b2"] + }], + "tags": [{"type": "TIMESTAMP","max": 10, "min": 0},{"type": "INT","max": 10, "min": 0}, {"type": "BIGINT","max": 10, "min": 0}, {"type": "FLOAT","max": 10, "min": 0}, {"type": "DOUBLE","max": 10, "min": 0}, {"type": "SMALLINT","max": 10, "min": 0}, {"type": "TINYINT","max": 10, "min": 0}, {"type": "BOOL","max": 10, "min": 0}, {"type": "NCHAR","len": 17, "count":1, + "values": ["d1", "d2"] + }, {"type": "UINT","max": 10, "min": 0}, {"type": "UBIGINT","max": 10, "min": 0}, {"type": "UTINYINT","max": 10, "min": 0}, {"type": "USMALLINT","max": 10, "min": 0}, {"type": "BINARY", "len": 19, "count":1, + "values": ["b1","b2"] + }] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_json_tag.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_json_tag.json new file mode 100644 index 0000000000000000000000000000000000000000..168ad47fcf6b9464bd9173dd37bb38261a1e7f48 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_json_tag.json @@ -0,0 +1,54 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "INT"}], + "tags": [{"type": "JSON", "len": 8, "count": 5}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json new file mode 100644 index 0000000000000000000000000000000000000000..4f742212a4813d953cd734c8a784deb315be0905 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_limit_offset.json @@ -0,0 +1,55 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "no", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"yes", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 2, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "partial_col_num": 3, + "columns": [{"type": "TIMESTAMP"},{"type": "INT", "len": 0}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 29, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 23, "count":1}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR","len": 17, "count":1}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY", "len": 19, "count":1}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_only_create_table.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_only_create_table.json new file mode 100644 index 0000000000000000000000000000000000000000..ea6f982aaeec2cb0b15156acab8dc087e6e8edc4 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_only_create_table.json @@ -0,0 +1,54 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "no", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "yes", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "rand", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 0, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_file": "./sample.csv", + "use_sample_ts": "no", + "tags_file": "", + "columns": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR"}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY"}], + "tags": [{"type": "TIMESTAMP"},{"type": "INT"}, {"type": "BIGINT"}, {"type": "FLOAT"}, {"type": "DOUBLE"}, {"type": "SMALLINT"}, {"type": "TINYINT"}, {"type": "BOOL"}, {"type": "NCHAR"}, {"type": "UINT"}, {"type": "UBIGINT"}, {"type": "UTINYINT"}, {"type": "USMALLINT"}, {"type": "BINARY"}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query.json new file mode 100644 index 0000000000000000000000000000000000000000..c8ff2e92759109a3e01b0f3d610f1de2b830b49d --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_query.json @@ -0,0 +1,33 @@ +{ + "filetype":"query", + "cfgdir": "/etc/taos", + "host": "localhost", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "db", + "query_times": 1, + "reset_query_cache": "yes", + "specified_table_query": + { + "query_interval": 1, + "concurrent":1, + "sqls": + [{ + "sql": "select count(*) from db.stb", + "result": "taosc_query_specified" + }] + }, + "super_table_query": { + "stblname": "stb", + "query_interval": 1, + "concurrent": 1, + "sqls": [ + { + "sql": "select count(*) from xxxx", + "result": "taosc_query_super" + } + ] + } +} \ No newline at end of file diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json new file mode 100644 index 0000000000000000000000000000000000000000..38aa47740f9381f5b991fb9b70f859ced3776601 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json @@ -0,0 +1,54 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "connection_pool_size": 20, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "prepared_rand": 10, + "chinese": "yes", + "insert_interval": 0, + "num_of_records_per_req": 10, + "databases": [{ + "dbinfo": { + "name": "db", + "drop": "yes", + "replica": 1, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp":2 + }, + "super_tables": [{ + "name": "stb", + "child_table_exists":"no", + "childtable_count": 8, + "childtable_prefix": "stb_", + "escape_character": "no", + "auto_create_table": "no", + "batch_create_tbl_num": 10, + "data_source": "sample", + "insert_mode": "taosc", + "line_protocol": "line", + "childtable_limit": 0, + "childtable_offset": 0, + "insert_rows": 20, + "insert_interval": 0, + "interlace_rows": 0, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "now", + "sample_file": "./5-taos-tools/taosbenchmark/csv/sample_use_ts.csv", + "use_sample_ts": "yes", + "tags_file": "./5-taos-tools/taosbenchmark/csv/sample_tags.csv", + "columns": [{"type": "INT"}], + "tags": [{"type": "INT"}] + }] + }] +} diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py b/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py new file mode 100644 index 0000000000000000000000000000000000000000..5f25e0229b1033a13c12b82b06669d43a156cf8e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py @@ -0,0 +1,73 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_json_tag.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb") + tdSql.checkData(2, 0, "jtag") + tdSql.checkData(2, 1, "JSON") + tdSql.checkData(2, 3, "TAG") + # 3.0 cannot distinct jtag + #tdSql.query("select count(jtag) from db.stb") + #tdSql.checkData(0, 0, 8) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py new file mode 100644 index 0000000000000000000000000000000000000000..47a60e475725123f877b055d699ab217bbb77236 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py @@ -0,0 +1,111 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_only_create_table.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.stb)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb") + tdSql.checkRows(0) + tdSql.query("describe db.stb") + tdSql.checkData(9, 1, "NCHAR") + tdSql.checkData(14, 1, "VARCHAR") + tdSql.checkData(23, 1, "NCHAR") + tdSql.checkData(28, 1, "VARCHAR") + tdSql.checkData(9, 2, 64) + tdSql.checkData(14, 2, 64) + tdSql.checkData(23, 2, 64) + tdSql.checkData(28, 2, 64) + + + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_limit_offset.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.stb)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 40) + tdSql.query("select distinct(c3) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c4) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c5) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c6) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c7) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c8) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c9) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c10) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c11) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c12) from db.stb") + tdSql.checkData(0, 0, None) + tdSql.query("select distinct(c13) from db.stb") + tdSql.checkData(0, 0, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py new file mode 100644 index 0000000000000000000000000000000000000000..84d9433967abb07baecb5da0870d97ff9c066f20 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py @@ -0,0 +1,127 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import ast +import os +import re +import subprocess + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +from util.taosadapter import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + tAdapter.init("") + tAdapter.deploy() + tAdapter.start() + binPath = self.getPath() + os.system("rm -f rest_query_specified-0 rest_query_super-0 taosc_query_specified-0 taosc_query_super-0") + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db") + tdSql.execute("use db") + tdSql.execute("create table stb (ts timestamp, c0 int) tags (t0 int)") + tdSql.execute("insert into stb_0 using stb tags (0) values (now, 0)") + tdSql.execute("insert into stb_1 using stb tags (1) values (now, 1)") + tdSql.execute("insert into stb_2 using stb tags (2) values (now, 2)") + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_query.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + with open("%s" % "taosc_query_specified-0", 'r+') as f1: + for line in f1.readlines(): + queryTaosc = line.strip().split()[0] + assert queryTaosc == '3' , "result is %s != expect: 3" % queryTaosc + + with open("%s" % "taosc_query_super-0", 'r+') as f1: + for line in f1.readlines(): + queryTaosc = line.strip().split()[0] + assert queryTaosc == '1', "result is %s != expect: 1" % queryTaosc + + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/rest_query.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + + times = 0 + with open("rest_query_super-0", 'r+') as f1: + + for line in f1.readlines(): + contents = line.strip() + if contents.find("data") != -1: + pattern = re.compile("{.*}") + contents = pattern.search(contents).group() + contentsDict = ast.literal_eval(contents) + queryResultRest = contentsDict['data'][0][0] + assert queryResultRest == 1, "result is %s != expect: 1" % queryResultRest + times += 1 + + assert times == 3, "result is %s != expect: 3" % times + + + times = 0 + with open("rest_query_specified-0", 'r+') as f1: + for line in f1.readlines(): + contents = line.strip() + if contents.find("data") != -1: + pattern = re.compile("{.*}") + contents = pattern.search(contents).group() + contentsDict = ast.literal_eval(contents) + queryResultRest = contentsDict['data'][0][0] + assert queryResultRest == 3, "result is %s != expect: 3" % queryResultRest + times += 1 + + assert times == 1, "result is %s != expect: 1" % times + + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py new file mode 100644 index 0000000000000000000000000000000000000000..772bb11df4e43171d5bffd3668a6124a0bf6bd12 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py @@ -0,0 +1,78 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/taosc_sample_use_ts.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.stb)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb") + tdSql.checkData(0, 0, 24) + tdSql.query("select * from db.stb_0") + tdSql.checkRows(3) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 2) + tdSql.checkData(2, 1, 3) + tdSql.query("select distinct(t0) from db.stb") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 17) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py new file mode 100644 index 0000000000000000000000000000000000000000..fec93765297c715b2aa33846c0a9b15fccd7ec1f --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py @@ -0,0 +1,76 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_interlace.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.stb1)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from (select distinct(tbname) from db.stb2)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb1") + result = tdSql.getData(0, 0) + assert result <= 160, "result is %s > expect: 160" % result + tdSql.query("select count(*) from db.stb2") + result = tdSql.getData(0, 0) + assert result <= 160, "result is %s > expect: 160" % result + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py new file mode 100644 index 0000000000000000000000000000000000000000..557d2a4884f674c15e602a665836c46564e00d47 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py @@ -0,0 +1,104 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_json_alltypes.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb1") + tdSql.checkData(1, 1, "BOOL") + tdSql.query("describe db.stb2") + tdSql.checkData(1, 1, "TINYINT") + tdSql.query("describe db.stb3") + tdSql.checkData(1, 1, "SMALLINT") + tdSql.query("describe db.stb4") + tdSql.checkData(1, 1, "INT") + tdSql.query("describe db.stb5") + tdSql.checkData(1, 1, "BIGINT") + tdSql.query("describe db.stb6") + tdSql.checkData(1, 1, "FLOAT") + tdSql.query("describe db.stb7") + tdSql.checkData(1, 1, "DOUBLE") + tdSql.query("describe db.stb8") + tdSql.checkData(1, 1, "VARCHAR") + tdSql.checkData(1, 2, 16) + tdSql.query("describe db.stb9") + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 16) + tdSql.query("select count(*) from db.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb2") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb4") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb5") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb6") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb7") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb8") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb9") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py new file mode 100644 index 0000000000000000000000000000000000000000..8ff56b533902c70f9b2ac72b680446c14488fffc --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py @@ -0,0 +1,120 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_telnet_alltypes.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("describe db.stb1") + tdSql.checkData(1, 1, "BOOL") + tdSql.query("describe db.stb2") + tdSql.checkData(1, 1, "TINYINT") + tdSql.query("describe db.stb3") + tdSql.checkData(1, 1, "TINYINT UNSIGNED") + tdSql.query("describe db.stb4") + tdSql.checkData(1, 1, "SMALLINT") + tdSql.query("describe db.stb5") + tdSql.checkData(1, 1, "SMALLINT UNSIGNED") + tdSql.query("describe db.stb6") + tdSql.checkData(1, 1, "INT") + tdSql.query("describe db.stb7") + tdSql.checkData(1, 1, "INT UNSIGNED") + tdSql.query("describe db.stb8") + tdSql.checkData(1, 1, "BIGINT") + tdSql.query("describe db.stb9") + tdSql.checkData(1, 1, "BIGINT UNSIGNED") + tdSql.query("describe db.stb10") + tdSql.checkData(1, 1, "FLOAT") + tdSql.query("describe db.stb11") + tdSql.checkData(1, 1, "DOUBLE") + tdSql.query("describe db.stb12") + tdSql.checkData(1, 1, "VARCHAR") + tdSql.checkData(1, 2, 16) + tdSql.query("describe db.stb13") + tdSql.checkData(1, 1, "NCHAR") + tdSql.checkData(1, 2, 16) + tdSql.query("select count(*) from db.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb2") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb3") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb4") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb5") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb6") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb7") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb8") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb9") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb10") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb11") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb12") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from db.stb13") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py new file mode 100644 index 0000000000000000000000000000000000000000..3f0a05b66590bf1af74cc35676a4cbf3caacfe72 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py @@ -0,0 +1,119 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +from util.taosadapter import * + + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + tAdapter.init("") + adapter_cfg = { + "influxdb": { + "enable": True + }, + "opentsdb": { + "enable": True + }, + "opentsdb_telnet": { + "enable": True, + "maxTCPConnection": 250, + "tcpKeepAlive": True, + "dbs": ["opentsdb_telnet", "collectd", "icinga2", "tcollector"], + "ports": [6046, 6047, 6048, 6049], + "user": "root", + "password": "taosdata" + } + } + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_rest_telnet.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db.stb1)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from (select distinct(tbname) from db.stb2)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db.stb2") + tdSql.checkData(0, 0, 160) + + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_rest_line.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db2.stb1)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db2.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from (select distinct(tbname) from db2.stb2)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db2.stb2") + tdSql.checkData(0, 0, 160) + + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_rest_json.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from db3.stb1)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db3.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from (select distinct(tbname) from db3.stb2)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from db3.stb2") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py b/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py new file mode 100644 index 0000000000000000000000000000000000000000..7aa0575f77625685631b691e578dcb365d94b1a6 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py @@ -0,0 +1,97 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- +import os +import time +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +from util.taosadapter import * + +class TDTestCase: + def caseDescription(self): + ''' + [TD-11510] taosBenchmark test cases + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getPath(self, tool="taosBenchmark"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + tdLog.exit("taosBenchmark not found!") + return + else: + tdLog.info("taosBenchmark found in %s" % paths[0]) + return paths[0] + + def run(self): + tAdapter.init("") + adapter_cfg = { + "influxdb": { + "enable": True + }, + "opentsdb": { + "enable": True + }, + "opentsdb_telnet": { + "enable": True, + "maxTCPConnection": 250, + "tcpKeepAlive": True, + "dbs": ["opentsdb_telnet", "collectd", "icinga2", "tcollector"], + "ports": [6046, 6047, 6048, 6049], + "user": "root", + "password": "taosdata" + } + } + tAdapter.update_cfg(adapter_cfg) + tAdapter.deploy() + tAdapter.start() + binPath = self.getPath() + cmd = "%s -f ./5-taos-tools/taosbenchmark/json/sml_telnet_tcp.json" %binPath + tdLog.info("%s" % cmd) + os.system("%s" % cmd) + time.sleep(5) + tdSql.execute("reset query cache") + tdSql.query("select count(*) from (select distinct(tbname) from opentsdb_telnet.stb1)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from opentsdb_telnet.stb1") + tdSql.checkData(0, 0, 160) + tdSql.query("select count(*) from (select distinct(tbname) from opentsdb_telnet.stb2)") + tdSql.checkData(0, 0, 8) + tdSql.query("select count(*) from opentsdb_telnet.stb2") + tdSql.checkData(0, 0, 160) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py new file mode 100644 index 0000000000000000000000000000000000000000..33ba4034ec07925574c5c8bd23a2d7951010980c --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py @@ -0,0 +1,123 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-14544] taosdump data inspect + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getPath(self, tool="taosdump"): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + paths = [] + for root, dirs, files in os.walk(projPath): + if ((tool) in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + paths.append(os.path.join(root, tool)) + break + if (len(paths) == 0): + return "" + return paths[0] + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 INT, c2 BOOL, c3 TINYINT, c4 SMALLINT, c5 BIGINT, c6 FLOAT, c7 DOUBLE, c8 TIMESTAMP, c9 BINARY(10), c10 NCHAR(10), c11 TINYINT UNSIGNED, c12 SMALLINT UNSIGNED, c13 INT UNSIGNED, c14 BIGINT UNSIGNED) tags(n1 INT, w2 BOOL, t3 TINYINT, t4 SMALLINT, t5 BIGINT, t6 FLOAT, t7 DOUBLE, t8 TIMESTAMP, t9 BINARY(10), t10 NCHAR(10), t11 TINYINT UNSIGNED, t12 SMALLINT UNSIGNED, t13 INT UNSIGNED, t14 BIGINT UNSIGNED)") + tdSql.execute( + "create table t1 using st tags(1, true, 1, 1, 1, 1.0, 1.0, 1, '1', '一', 1, 1, 1, 1)") + tdSql.execute( + "insert into t1 values(1640000000000, 1, true, 1, 1, 1, 1.0, 1.0, 1, '1', '一', 1, 1, 1, 1)") + tdSql.execute( + "create table t2 using st tags(NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)") + tdSql.execute( + "insert into t2 values(1640000000000, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)") + +# sys.exit(1) + + binPath = self.getPath("taosdump") + if (binPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % binPath) + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%s --databases db -o %s -T 1" % + (binPath, self.tmpdir)) + +# sys.exit(1) + + taosdumpInspectCmd = "%s -I %s/*.avro* -s | grep 'Schema:'|wc -l" % ( + binPath, self.tmpdir) + schemaTimes = subprocess.check_output( + taosdumpInspectCmd, shell=True).decode("utf-8") + print("schema found times: %d" % int(schemaTimes)) + + if (int(schemaTimes) != 3): + caller = inspect.getframeinfo(inspect.stack()[0][0]) + tdLog.exit( + "%s(%d) failed: expected schema found times 3, actual %d" % + (caller.filename, caller.lineno, int(schemaTimes))) + + taosdumpInspectCmd = "%s -I %s/*.avro* | grep '=== Records:'|wc -l" % ( + binPath, self.tmpdir) + recordsTimes = subprocess.check_output( + taosdumpInspectCmd, shell=True).decode("utf-8") + print("records found times: %d" % int(recordsTimes)) + + if (int(recordsTimes) != 3): + caller = inspect.getframeinfo(inspect.stack()[0][0]) + tdLog.exit( + "%s(%d) failed: expected records found times 3, actual %d" % + (caller.filename, caller.lineno, int(recordsTimes))) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBigInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBigInt.py new file mode 100644 index 0000000000000000000000000000000000000000..82c17a459b11a27e7e6c08d6d26a460b772504b0 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBigInt.py @@ -0,0 +1,141 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports big int + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 BIGINT) tags(bntag BIGINT)") + tdSql.execute("create table t1 using st tags(1)") + tdSql.execute("insert into t1 values(1640000000000, 1)") + + tdSql.execute("create table t2 using st tags(9223372036854775807)") + tdSql.execute( + "insert into t2 values(1640000000000, 9223372036854775807)") + + tdSql.execute("create table t3 using st tags(-9223372036854775807)") + tdSql.execute( + "insert into t3 values(1640000000000, -9223372036854775807)") + + tdSql.execute("create table t4 using st tags(NULL)") + tdSql.execute("insert into t4 values(1640000000000, NULL)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%staosdump --databases db -o %s -T 1" % + (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -T 1" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(4) + + tdSql.query("select * from st where bntag = 1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 1) + + tdSql.query("select * from st where bntag = 9223372036854775807") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 9223372036854775807) + tdSql.checkData(0, 2, 9223372036854775807) + + tdSql.query("select * from st where bntag = -9223372036854775807") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, -9223372036854775807) + tdSql.checkData(0, 2, -9223372036854775807) + + tdSql.query("select * from st where bntag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBinary.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBinary.py new file mode 100644 index 0000000000000000000000000000000000000000..4909eb376222bbea7102208d4418d608b827fbbf --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBinary.py @@ -0,0 +1,127 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports binary + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 BINARY(5), c2 BINARY(5)) tags(btag BINARY(5))") + tdSql.execute("create table t1 using st tags('test')") + tdSql.execute("insert into t1 values(1640000000000, '01234', '56789')") + tdSql.execute("insert into t1 values(1640000000001, 'abcd', 'efgh')") + tdSql.execute("create table t2 using st tags(NULL)") + tdSql.execute("insert into t2 values(1640000000000, NULL, NULL)") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system("%staosdump --databases db -o %s" % (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(2) + tdSql.checkData(0, 0, 't2') + tdSql.checkData(1, 0, 't1') + + tdSql.query("select btag from st where tbname = 't1'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, "test") + + tdSql.query("select btag from st where tbname = 't2'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, None) + + tdSql.query("select * from st where btag = 'test'") + tdSql.checkRows(2) + tdSql.checkData(0, 1, "01234") + tdSql.checkData(0, 2, "56789") + tdSql.checkData(1, 1, "abcd") + tdSql.checkData(1, 2, "efgh") + + tdSql.query("select * from st where btag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBool.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBool.py new file mode 100644 index 0000000000000000000000000000000000000000..138f7ba81c036c723bcf945cbce97c144d43db1b --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBool.py @@ -0,0 +1,130 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports bool + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 BOOL) tags(btag BOOL)") + tdSql.execute("create table t1 using st tags(true)") + tdSql.execute("insert into t1 values(1640000000000, true)") + tdSql.execute("create table t2 using st tags(false)") + tdSql.execute("insert into t2 values(1640000000000, false)") + tdSql.execute("create table t3 using st tags(NULL)") + tdSql.execute("insert into t3 values(1640000000000, NULL)") + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system("%staosdump --databases db -o %s" % (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(3) + tdSql.checkData(0, 0, 't3') + tdSql.checkData(1, 0, 't2') + tdSql.checkData(2, 0, 't1') + + tdSql.query("select btag from st") + tdSql.checkRows(3) + tdSql.checkData(0, 0, "False") + tdSql.checkData(1, 0, "True") + tdSql.checkData(2, 0, None) + + tdSql.query("select * from st where btag = 'true'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, "True") + tdSql.checkData(0, 2, "True") + + tdSql.query("select * from st where btag = 'false'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, "False") + tdSql.checkData(0, 2, "False") + + tdSql.query("select * from st where btag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeDouble.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeDouble.py new file mode 100644 index 0000000000000000000000000000000000000000..24ebb0fa77a4423773a9fedc996da51eba889b3f --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeDouble.py @@ -0,0 +1,158 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import math +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports double + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 DOUBLE) tags(dbtag DOUBLE)") + tdSql.execute("create table t1 using st tags(1.0)") + tdSql.execute("insert into t1 values(1640000000000, 1.0)") + + tdSql.execute("create table t2 using st tags(1.7E308)") + tdSql.execute("insert into t2 values(1640000000000, 1.7E308)") + + tdSql.execute("create table t3 using st tags(-1.7E308)") + tdSql.execute("insert into t3 values(1640000000000, -1.7E308)") + + tdSql.execute("create table t4 using st tags(NULL)") + tdSql.execute("insert into t4 values(1640000000000, NULL)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%staosdump --databases db -o %s -T 1" % + (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -T 1" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(4) + + tdSql.query("select * from st where dbtag = 1.0") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + if not math.isclose(tdSql.getData(0, 1), 1.0): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 1), 1.0)) + tdLog.exit("data is different") + if not math.isclose(tdSql.getData(0, 2), 1.0): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 2), 1.0)) + tdLog.exit("data is different") + + tdSql.query("select * from st where dbtag = 1.7E308") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + if not math.isclose(tdSql.getData(0, 1), 1.7E308): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 1), 1.7E308)) + tdLog.exit("data is different") + if not math.isclose(tdSql.getData(0, 2), 1.7E308): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 2), 1.7E308)) + tdLog.exit("data is different") + + tdSql.query("select * from st where dbtag = -1.7E308") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + if not math.isclose(tdSql.getData(0, 1), -1.7E308): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 1), -1.7E308)) + tdLog.exit("data is different") + if not math.isclose(tdSql.getData(0, 2), -1.7E308): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 2), -1.7E308)) + tdLog.exit("data is different") + + tdSql.query("select * from st where dbtag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeFloat.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeFloat.py new file mode 100644 index 0000000000000000000000000000000000000000..2ce42bb7718920211ab6c2e5e1a0fdcdb57a8fb7 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeFloat.py @@ -0,0 +1,160 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import math +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports float + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 FLOAT) tags(ftag FLOAT)") + tdSql.execute("create table t1 using st tags(1.0)") + tdSql.execute("insert into t1 values(1640000000000, 1.0)") + + tdSql.execute("create table t2 using st tags(3.40E+38)") + tdSql.execute("insert into t2 values(1640000000000, 3.40E+38)") + + tdSql.execute("create table t3 using st tags(-3.40E+38)") + tdSql.execute("insert into t3 values(1640000000000, -3.40E+38)") + + tdSql.execute("create table t4 using st tags(NULL)") + tdSql.execute("insert into t4 values(1640000000000, NULL)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%staosdump --databases db -o %s -T 1" % + (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -T 1" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(4) + + tdSql.query("select * from st where ftag = 1.0") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + if not math.isclose(tdSql.getData(0, 1), 1.0): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 1), 1.0)) + tdLog.exit("data is different") + if not math.isclose(tdSql.getData(0, 2), 1.0): + tdLog.exit("data is different") + + tdSql.query("select * from st where ftag = 3.4E38") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + if not math.isclose(tdSql.getData(0, 1), 3.4E38, + rel_tol=1e-07, abs_tol=0.0): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 1), 3.4E38)) + tdLog.exit("data is different") + if not math.isclose(tdSql.getData(0, 2), 3.4E38, + rel_tol=1e-07, abs_tol=0.0): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 2), 3.4E38)) + tdLog.exit("data is different") + + tdSql.query("select * from st where ftag = -3.4E38") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + if not math.isclose(tdSql.getData(0, 1), (-3.4E38), + rel_tol=1e-07, abs_tol=0.0): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 1), -3.4E38)) + tdLog.exit("data is different") + if not math.isclose(tdSql.getData(0, 2), (-3.4E38), + rel_tol=1e-07, abs_tol=0.0): + tdLog.debug("getData(0, 1): %f, to compare %f" % + (tdSql.getData(0, 2), -3.4E38)) + tdLog.exit("data is different") + + tdSql.query("select * from st where ftag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeInt.py new file mode 100644 index 0000000000000000000000000000000000000000..b6a24a6eee5cb01faf1b861eb1750a91d2587c3e --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeInt.py @@ -0,0 +1,136 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports int + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 INT) tags(ntag INT)") + tdSql.execute("create table t1 using st tags(1)") + tdSql.execute("insert into t1 values(1640000000000, 1)") + tdSql.execute("create table t2 using st tags(2147483647)") + tdSql.execute("insert into t2 values(1640000000000, 2147483647)") + tdSql.execute("create table t3 using st tags(-2147483647)") + tdSql.execute("insert into t3 values(1640000000000, -2147483647)") + tdSql.execute("create table t4 using st tags(NULL)") + tdSql.execute("insert into t4 values(1640000000000, NULL)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%staosdump --databases db -o %s -T 1" % + (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -T 1" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(4) + + tdSql.query("select * from st where ntag = 1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 1) + + tdSql.query("select * from st where ntag = 2147483647") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 2147483647) + tdSql.checkData(0, 2, 2147483647) + + tdSql.query("select * from st where ntag = -2147483647") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, -2147483647) + tdSql.checkData(0, 2, -2147483647) + + tdSql.query("select * from st where ntag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeJson.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeJson.py new file mode 100644 index 0000000000000000000000000000000000000000..cf0c7f4ac594faf8e30582bd205e126b5097b9f4 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeJson.py @@ -0,0 +1,128 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12362] taosdump supports JSON + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 int) tags(jtag JSON)") + tdSql.execute( + "create table t1 using st tags('{\"location\": \"beijing\"}')") + tdSql.execute("insert into t1 values(1500000000000, 1)") + + tdSql.execute( + "create table t2 using st tags(NULL)") + tdSql.execute("insert into t2 values(1500000000000, NULL)") + + tdSql.execute( + "create table t3 using st tags('')") + tdSql.execute("insert into t3 values(1500000000000, 0)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system("%staosdump --databases db -o %s -g" % (binPath, self.tmpdir)) + + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -g" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(3) + tdSql.checkData(0, 0, 't3') + + tdSql.query("select jtag->'location' from st") + tdSql.checkRows(3) + tdSql.checkData(0, 0, "\"beijing\"") + + tdSql.query("select * from st where jtag contains 'location'") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, '{\"location\":\"beijing\"}') + + tdSql.query("select jtag from st") + tdSql.checkRows(3) + tdSql.checkData(0, 0, "{\"location\":\"beijing\"}") + tdSql.checkData(1, 0, None) + tdSql.checkData(2, 0, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeSmallInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeSmallInt.py new file mode 100644 index 0000000000000000000000000000000000000000..2fc1ffb75e5d31d501024e1432a02f62a0fbd480 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeSmallInt.py @@ -0,0 +1,138 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports small int + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 SMALLINT) tags(sntag SMALLINT)") + tdSql.execute("create table t1 using st tags(1)") + tdSql.execute("insert into t1 values(1640000000000, 1)") + + tdSql.execute("create table t2 using st tags(32767)") + tdSql.execute("insert into t2 values(1640000000000, 32767)") + + tdSql.execute("create table t3 using st tags(-32767)") + tdSql.execute("insert into t3 values(1640000000000, -32767)") + + tdSql.execute("create table t4 using st tags(NULL)") + tdSql.execute("insert into t4 values(1640000000000, NULL)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%staosdump --databases db -o %s -T 1" % + (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -T 1" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(4) + + tdSql.query("select * from st where sntag = 1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 1) + + tdSql.query("select * from st where sntag = 32767") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 32767) + tdSql.checkData(0, 2, 32767) + + tdSql.query("select * from st where sntag = -32767") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, -32767) + tdSql.checkData(0, 2, -32767) + + tdSql.query("select * from st where sntag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeTinyInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeTinyInt.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc18fcd01e2fd0c210954224268e2c673d33406 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeTinyInt.py @@ -0,0 +1,138 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports tiny int + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 TINYINT) tags(tntag TINYINT)") + tdSql.execute("create table t1 using st tags(1)") + tdSql.execute("insert into t1 values(1640000000000, 1)") + + tdSql.execute("create table t2 using st tags(127)") + tdSql.execute("insert into t2 values(1640000000000, 127)") + + tdSql.execute("create table t3 using st tags(-127)") + tdSql.execute("insert into t3 values(1640000000000, -127)") + + tdSql.execute("create table t4 using st tags(NULL)") + tdSql.execute("insert into t4 values(1640000000000, NULL)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%staosdump --databases db -o %s -T 1" % + (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -T 1" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(4) + + tdSql.query("select * from st where tntag = 1") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 1) + tdSql.checkData(0, 2, 1) + + tdSql.query("select * from st where tntag = 127") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 127) + tdSql.checkData(0, 2, 127) + + tdSql.query("select * from st where tntag = -127") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, -127) + tdSql.checkData(0, 2, -127) + + tdSql.query("select * from st where tntag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedBigInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedBigInt.py new file mode 100644 index 0000000000000000000000000000000000000000..1a6e9a69d9b19365c791f7840f0782a5ef5231c7 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedBigInt.py @@ -0,0 +1,128 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12655] taosdump supports unsigned big int + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 BIGINT UNSIGNED) tags(ubntag BIGINT UNSIGNED)") + tdSql.execute("create table t1 using st tags(0)") + tdSql.execute("insert into t1 values(1640000000000, 0)") + tdSql.execute("create table t2 using st tags(18446744073709551614)") + tdSql.execute("insert into t2 values(1640000000000, 18446744073709551614)") + tdSql.execute("create table t3 using st tags(NULL)") + tdSql.execute("insert into t3 values(1640000000000, NULL)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%staosdump --databases db -o %s -T 1 -g" % + (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -T 1 -g" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(3) + + tdSql.query("select * from st where ubntag = 0") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 0) + tdSql.checkData(0, 2, 0) + + tdSql.query("select * from st where ubntag = 18446744073709551614") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 18446744073709551614) + tdSql.checkData(0, 2, 18446744073709551614) + + tdSql.query("select * from st where ubntag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedInt.py new file mode 100644 index 0000000000000000000000000000000000000000..e71650bc8a09b91c6eabe709990b0dc01782d949 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedInt.py @@ -0,0 +1,128 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports unsigned int + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 INT UNSIGNED) tags(untag INT UNSIGNED)") + tdSql.execute("create table t1 using st tags(0)") + tdSql.execute("insert into t1 values(1640000000000, 0)") + tdSql.execute("create table t2 using st tags(4294967294)") + tdSql.execute("insert into t2 values(1640000000000, 4294967294)") + tdSql.execute("create table t3 using st tags(NULL)") + tdSql.execute("insert into t3 values(1640000000000, NULL)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%staosdump --databases db -o %s -T 1 -g" % + (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -T 1 -g" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(3) + + tdSql.query("select * from st where untag = 0") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 0) + tdSql.checkData(0, 2, 0) + + tdSql.query("select * from st where untag = 4294967294") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 4294967294) + tdSql.checkData(0, 2, 4294967294) + + tdSql.query("select * from st where untag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedSmallInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedSmallInt.py new file mode 100644 index 0000000000000000000000000000000000000000..d05a397c3649610dc9569c3ac32a4fb9fe189800 --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedSmallInt.py @@ -0,0 +1,128 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports unsigned small int + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 SMALLINT UNSIGNED) tags(usntag SMALLINT UNSIGNED)") + tdSql.execute("create table t1 using st tags(0)") + tdSql.execute("insert into t1 values(1640000000000, 0)") + tdSql.execute("create table t2 using st tags(65534)") + tdSql.execute("insert into t2 values(1640000000000, 65534)") + tdSql.execute("create table t3 using st tags(NULL)") + tdSql.execute("insert into t3 values(1640000000000, NULL)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%staosdump --databases db -o %s -T 1 -g" % + (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -T 1 -g" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(3) + + tdSql.query("select * from st where usntag = 0") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 0) + tdSql.checkData(0, 2, 0) + + tdSql.query("select * from st where usntag = 65534") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 65534) + tdSql.checkData(0, 2, 65534) + + tdSql.query("select * from st where usntag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedTinyInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedTinyInt.py new file mode 100644 index 0000000000000000000000000000000000000000..9995d3812bfb44c0f5812db5b8fafbb576dbb86b --- /dev/null +++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedTinyInt.py @@ -0,0 +1,128 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def caseDescription(self): + ''' + case1: [TD-12526] taosdump supports unsigned tiny int + ''' + return + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + self.tmpdir = "tmp" + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + buildPath = "" + for root, dirs, files in os.walk(projPath): + if ("taosdump" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + + tdSql.execute("drop database if exists db") + tdSql.execute("create database db days 11 keep 3649 blocks 8 ") + + tdSql.execute("use db") + tdSql.execute( + "create table st(ts timestamp, c1 TINYINT UNSIGNED) tags(utntag TINYINT UNSIGNED)") + tdSql.execute("create table t1 using st tags(0)") + tdSql.execute("insert into t1 values(1640000000000, 0)") + tdSql.execute("create table t2 using st tags(254)") + tdSql.execute("insert into t2 values(1640000000000, 254)") + tdSql.execute("create table t3 using st tags(NULL)") + tdSql.execute("insert into t3 values(1640000000000, NULL)") + +# sys.exit(1) + + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosdump not found!") + else: + tdLog.info("taosdump found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + + if not os.path.exists(self.tmpdir): + os.makedirs(self.tmpdir) + else: + print("directory exists") + os.system("rm -rf %s" % self.tmpdir) + os.makedirs(self.tmpdir) + + os.system( + "%staosdump --databases db -o %s -T 1 -g" % + (binPath, self.tmpdir)) + +# sys.exit(1) + tdSql.execute("drop database db") + + os.system("%staosdump -i %s -T 1 -g" % (binPath, self.tmpdir)) + + tdSql.query("show databases") + tdSql.checkRows(1) + + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 'st') + + tdSql.query("show tables") + tdSql.checkRows(3) + + tdSql.query("select * from st where utntag = 0") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 0) + tdSql.checkData(0, 2, 0) + + tdSql.query("select * from st where utntag = 254") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1640000000000) + tdSql.checkData(0, 1, 254) + tdSql.checkData(0, 2, 254) + + tdSql.query("select * from st where utntag is null") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 0) + tdSql.checkData(0, 1, None) + tdSql.checkData(0, 2, None) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/develop-test/fulltest.sh b/tests/develop-test/fulltest.sh new file mode 100644 index 0000000000000000000000000000000000000000..69cade3855b087fc7638eea22b4926d088b5d86b --- /dev/null +++ b/tests/develop-test/fulltest.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -e +set -x + +python3 ./test.py -f 5-taos-tools/taosbenchmark/auto_create_table_json.py +#python3 ./test.py -f 5-taos-tools/taosbenchmark/commandline.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/custom_col_tag.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/default_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/demo.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/insert_alltypes_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/invalid_commandline.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/json_tag.py +#python3 ./test.py -f 5-taos-tools/taosbenchmark/limit_offset_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/query_json.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/sample_csv_json.py +#python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_interlace.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_json_alltypes.py +#python3 ./test.py -f 5-taos-tools/taosbenchmark/sml_telnet_alltypes.py +#python3 ./test.py -f 5-taos-tools/taosbenchmark/taosadapter_json.py +#python3 ./test.py -f 5-taos-tools/taosbenchmark/telnet_tcp.py diff --git a/tests/develop-test/test.py b/tests/develop-test/test.py new file mode 100644 index 0000000000000000000000000000000000000000..5dc6139410bf7d53da8e8fd36e047bb81a7e5eae --- /dev/null +++ b/tests/develop-test/test.py @@ -0,0 +1,496 @@ +#!/usr/bin/python +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### +# install pip +# pip install src/connector/python/ + +# -*- coding: utf-8 -*- +import sys +import getopt +import subprocess +import time +import base64 +import json +import platform +import socket +import threading + +import toml +sys.path.append("../pytest") +from util.log import * +from util.dnodes import * +from util.cases import * +from util.cluster import * +from util.taosadapter import * + +import taos +import taosrest + +def checkRunTimeError(): + import win32gui + timeCount = 0 + while 1: + time.sleep(1) + timeCount = timeCount + 1 + print("checkRunTimeError",timeCount) + if (timeCount>600): + print("stop the test.") + os.system("TASKKILL /F /IM taosd.exe") + os.system("TASKKILL /F /IM taos.exe") + os.system("TASKKILL /F /IM tmq_sim.exe") + os.system("TASKKILL /F /IM mintty.exe") + os.system("TASKKILL /F /IM python.exe") + quit(0) + hwnd = win32gui.FindWindow(None, "Microsoft Visual C++ Runtime Library") + if hwnd: + os.system("TASKKILL /F /IM taosd.exe") + +if __name__ == "__main__": + + fileName = "all" + deployPath = "" + masterIp = "" + testCluster = False + valgrind = 0 + killValgrind = 1 + logSql = True + stop = 0 + restart = False + dnodeNums = 1 + mnodeNums = 0 + updateCfgDict = {} + adapter_cfg_dict = {} + execCmd = "" + queryPolicy = 1 + createDnodeNums = 1 + restful = False + opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scghrd:k:e:N:M:Q:C:RD:', [ + 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help', 'restart', 'updateCfgDict', 'killv', 'execCmd','dnodeNums','mnodeNums','queryPolicy','createDnodeNums','restful','adaptercfgupdate']) + for key, value in opts: + if key in ['-h', '--help']: + tdLog.printNoPrefix( + 'A collection of test cases written using Python') + tdLog.printNoPrefix('-f Name of test case file written by Python') + tdLog.printNoPrefix('-p Deploy Path for Simulator') + tdLog.printNoPrefix('-m Master Ip for Simulator') + tdLog.printNoPrefix('-l logSql Flag') + tdLog.printNoPrefix('-s stop All dnodes') + tdLog.printNoPrefix('-c Test Cluster Flag') + tdLog.printNoPrefix('-g valgrind Test Flag') + tdLog.printNoPrefix('-r taosd restart test') + tdLog.printNoPrefix('-d update cfg dict, base64 json str') + tdLog.printNoPrefix('-k not kill valgrind processer') + tdLog.printNoPrefix('-e eval str to run') + tdLog.printNoPrefix('-N start dnodes numbers in clusters') + tdLog.printNoPrefix('-M create mnode numbers in clusters') + tdLog.printNoPrefix('-Q set queryPolicy in one dnode') + tdLog.printNoPrefix('-C create Dnode Numbers in one cluster') + tdLog.printNoPrefix('-R restful realization form') + tdLog.printNoPrefix('-D taosadapter update cfg dict ') + + + sys.exit(0) + + if key in ['-r', '--restart']: + restart = True + + if key in ['-f', '--file']: + fileName = value + + if key in ['-p', '--path']: + deployPath = value + + if key in ['-m', '--master']: + masterIp = value + + if key in ['-l', '--logSql']: + if (value.upper() == "TRUE"): + logSql = True + elif (value.upper() == "FALSE"): + logSql = False + else: + tdLog.printNoPrefix("logSql value %s is invalid" % logSql) + sys.exit(0) + + if key in ['-c', '--cluster']: + testCluster = True + + if key in ['-g', '--valgrind']: + valgrind = 1 + + if key in ['-s', '--stop']: + stop = 1 + + if key in ['-d', '--updateCfgDict']: + try: + updateCfgDict = eval(base64.b64decode(value.encode()).decode()) + except: + print('updateCfgDict convert fail.') + sys.exit(0) + + if key in ['-k', '--killValgrind']: + killValgrind = 0 + + if key in ['-e', '--execCmd']: + try: + execCmd = base64.b64decode(value.encode()).decode() + except: + print('execCmd run fail.') + sys.exit(0) + + if key in ['-N', '--dnodeNums']: + dnodeNums = value + + if key in ['-M', '--mnodeNums']: + mnodeNums = value + + if key in ['-Q', '--queryPolicy']: + queryPolicy = value + + if key in ['-C', '--createDnodeNums']: + createDnodeNums = value + + if key in ['-R', '--restful']: + restful = True + + if key in ['-D', '--adaptercfgupdate']: + try: + adaptercfgupdate = eval(base64.b64decode(value.encode()).decode()) + except: + print('adapter cfg update convert fail.') + sys.exit(0) + + if not execCmd == "": + if restful: + tAdapter.init(deployPath) + else: + tdDnodes.init(deployPath) + print(execCmd) + exec(execCmd) + quit() + + if (stop != 0): + if (valgrind == 0): + toBeKilled = "taosd" + else: + toBeKilled = "valgrind.bin" + + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled + + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output(psCmd, shell=True) + + while(processID): + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True) + + for port in range(6030, 6041): + usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port + processID = subprocess.check_output(usePortPID, shell=True) + + if processID: + killCmd = "kill -TERM %s" % processID + os.system(killCmd) + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + if valgrind: + time.sleep(2) + + if restful: + toBeKilled = "taosadapter" + + killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -TERM > /dev/null 2>&1" % toBeKilled + + psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled + processID = subprocess.check_output(psCmd, shell=True) + + while(processID): + os.system(killCmd) + time.sleep(1) + processID = subprocess.check_output(psCmd, shell=True) + + for port in range(6030, 6041): + usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port + processID = subprocess.check_output(usePortPID, shell=True) + + if processID: + killCmd = "kill -TERM %s" % processID + os.system(killCmd) + fuserCmd = "fuser -k -n tcp %d" % port + os.system(fuserCmd) + + tdLog.info('stop taosadapter') + + tdLog.info('stop All dnodes') + + if masterIp == "": + host = socket.gethostname() + else: + try: + config = eval(masterIp) + host = config["host"] + except Exception as r: + host = masterIp + + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + if platform.system().lower() == 'windows': + fileName = fileName.replace("/", os.sep) + if (masterIp == "" and not fileName == "0-others\\udf_create.py"): + threading.Thread(target=checkRunTimeError,daemon=True).start() + tdLog.info("Procedures for testing self-deployment") + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + key_word = 'tdCases.addWindows' + is_test_framework = 0 + try: + if key_word in open(fileName, encoding='UTF-8').read(): + is_test_framework = 1 + except Exception as r: + print(r) + updateCfgDictStr = '' + # adapter_cfg_dict_str = '' + if is_test_framework: + moduleName = fileName.replace(".py", "").replace(os.sep, ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + if ((json.dumps(updateCfgDict) == '{}') and hasattr(ucase, 'updatecfgDict')): + updateCfgDict = ucase.updatecfgDict + updateCfgDictStr = "-d %s"%base64.b64encode(json.dumps(updateCfgDict).encode()).decode() + if ((json.dumps(adapter_cfg_dict) == '{}') and hasattr(ucase, 'taosadapter_cfg_dict')): + adapter_cfg_dict = ucase.taosadapter_cfg_dict + # adapter_cfg_dict_str = f"-D {base64.b64encode(toml.dumps(adapter_cfg_dict).encode()).decode()}" + except Exception as r: + print(r) + else: + pass + if restful: + tAdapter.init(deployPath, masterIp) + tAdapter.stop(force_kill=True) + + if dnodeNums == 1 : + tdDnodes.deploy(1,updateCfgDict) + tdDnodes.start(1) + tdCases.logSql(logSql) + if restful: + tAdapter.deploy(adapter_cfg_dict) + tAdapter.start() + + if queryPolicy != 1: + queryPolicy=int(queryPolicy) + if restful: + conn = taosrest.connect(url=f"http://{host}:6041") + else: + conn = taos.connect(host,config=tdDnodes.getSimCfgPath()) + + cursor = conn.cursor() + cursor.execute("create qnode on dnode 1") + cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"') + cursor.execute("show local variables") + res = cursor.fetchall() + for i in range(cursor.rowcount): + if res[i][0] == "queryPolicy" : + if int(res[i][1]) == int(queryPolicy): + tdLog.success(f'alter queryPolicy to {queryPolicy} successfully') + else: + tdLog.debug(res) + tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") + else : + tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums)) + dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums) + tdDnodes = ClusterDnodes(dnodeslist) + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + for dnode in tdDnodes.dnodes: + tdDnodes.deploy(dnode.index,{}) + for dnode in tdDnodes.dnodes: + tdDnodes.starttaosd(dnode.index) + tdCases.logSql(logSql) + + if restful: + tAdapter.deploy(adapter_cfg_dict) + tAdapter.start() + + if not restful: + conn = taos.connect(host,config=tdDnodes.getSimCfgPath()) + else: + conn = taosrest.connect(url=f"http://{host}:6041") + # tdLog.info(tdDnodes.getSimCfgPath(),host) + if createDnodeNums == 1: + createDnodeNums=dnodeNums + else: + createDnodeNums=createDnodeNums + cluster.create_dnode(conn,createDnodeNums) + try: + if cluster.check_dnode(conn) : + print("check dnode ready") + except Exception as r: + print(r) + if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True: + conn = None + else: + if not restful: + conn = taos.connect(host="%s"%(host), config=tdDnodes.sim.getCfgDir()) + else: + conn = taosrest.connect(url=f"http://{host}:6041") + if is_test_framework: + tdCases.runOneWindows(conn, fileName) + else: + tdCases.runAllWindows(conn) + else: + tdDnodes.setKillValgrind(killValgrind) + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + is_test_framework = 0 + key_word = 'tdCases.addLinux' + try: + if key_word in open(fileName).read(): + is_test_framework = 1 + except: + pass + if is_test_framework: + moduleName = fileName.replace(".py", "").replace("/", ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + if (json.dumps(updateCfgDict) == '{}'): + updateCfgDict = ucase.updatecfgDict + if (json.dumps(adapter_cfg_dict) == '{}'): + adapter_cfg_dict = ucase.taosadapter_cfg_dict + except: + pass + + if restful: + tAdapter.init(deployPath, masterIp) + tAdapter.stop(force_kill=True) + + if dnodeNums == 1 : + tdDnodes.deploy(1,updateCfgDict) + tdDnodes.start(1) + tdCases.logSql(logSql) + + if restful: + tAdapter.deploy(adapter_cfg_dict) + tAdapter.start() + + if queryPolicy != 1: + queryPolicy=int(queryPolicy) + if not restful: + conn = taos.connect(host,config=tdDnodes.getSimCfgPath()) + else: + conn = taosrest.connect(url=f"http://{host}:6041") + # tdSql.init(conn.cursor()) + # tdSql.execute("create qnode on dnode 1") + # tdSql.execute('alter local "queryPolicy" "%d"'%queryPolicy) + # tdSql.query("show local variables;") + # for i in range(tdSql.queryRows): + # if tdSql.queryResult[i][0] == "queryPolicy" : + # if int(tdSql.queryResult[i][1]) == int(queryPolicy): + # tdLog.success('alter queryPolicy to %d successfully'%queryPolicy) + # else : + # tdLog.debug(tdSql.queryResult) + # tdLog.exit("alter queryPolicy to %d failed"%queryPolicy) + + cursor = conn.cursor() + cursor.execute("create qnode on dnode 1") + cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"') + cursor.execute("show local variables") + res = cursor.fetchall() + for i in range(cursor.rowcount): + if res[i][0] == "queryPolicy" : + if int(res[i][1]) == int(queryPolicy): + tdLog.success(f'alter queryPolicy to {queryPolicy} successfully') + else: + tdLog.debug(res) + tdLog.exit(f"alter queryPolicy to {queryPolicy} failed") + + else : + tdLog.debug("create an cluster with %s nodes and make %s dnode as independent mnode"%(dnodeNums,mnodeNums)) + dnodeslist = cluster.configure_cluster(dnodeNums=dnodeNums,mnodeNums=mnodeNums) + tdDnodes = ClusterDnodes(dnodeslist) + tdDnodes.init(deployPath, masterIp) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + tdDnodes.stopAll() + for dnode in tdDnodes.dnodes: + tdDnodes.deploy(dnode.index,{}) + for dnode in tdDnodes.dnodes: + tdDnodes.starttaosd(dnode.index) + tdCases.logSql(logSql) + + if restful: + tAdapter.deploy(adapter_cfg_dict) + tAdapter.start() + + if not restful: + conn = taos.connect(host,config=tdDnodes.getSimCfgPath()) + else: + conn = taosrest.connect(url=f"http://{host}:6041") + print(tdDnodes.getSimCfgPath(),host) + if createDnodeNums == 1: + createDnodeNums=dnodeNums + else: + createDnodeNums=createDnodeNums + cluster.create_dnode(conn,createDnodeNums) + try: + if cluster.check_dnode(conn) : + print("check dnode ready") + except Exception as r: + print(r) + + + if testCluster: + tdLog.info("Procedures for testing cluster") + if fileName == "all": + tdCases.runAllCluster() + else: + tdCases.runOneCluster(fileName) + else: + tdLog.info("Procedures for testing self-deployment") + if not restful: + conn = taos.connect(host,config=tdDnodes.getSimCfgPath()) + else: + conn = taosrest.connect(url=f"http://{host}:6041") + + if fileName == "all": + tdCases.runAllLinux(conn) + else: + tdCases.runOneLinux(conn, fileName) + + if restart: + if fileName == "all": + tdLog.info("not need to query ") + else: + sp = fileName.rsplit(".", 1) + if len(sp) == 2 and sp[1] == "py": + tdDnodes.stopAll() + tdDnodes.start(1) + time.sleep(1) + if not restful: + conn = taos.connect( host, config=tdDnodes.getSimCfgPath()) + else: + conn = taosrest.connect(url=f"http://{host}:6041") + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + tdLog.info("query test after taosd restart") + tdCases.runOneLinux(conn, sp[0] + "_" + "restart.py") + else: + tdLog.info("not need to query") + + if conn is not None: + conn.close() + sys.exit(0) diff --git a/tests/parallel_test/collect_cases.sh b/tests/parallel_test/collect_cases.sh index bc942263d0c1fc713ee2f4319542f509a32c1425..3294beebc11ce012b1b89a6b7f0f796a51acaa92 100755 --- a/tests/parallel_test/collect_cases.sh +++ b/tests/parallel_test/collect_cases.sh @@ -40,6 +40,7 @@ else fi cat ../script/jenkins/basic.txt |grep -v "^#"|grep -v "^$"|sed "s/^/,,script,/" >>$case_file grep "^python" ../system-test/fulltest.sh |sed "s/^/,,system-test,/" >>$case_file +grep "^python" ../develop-test/fulltest.sh |sed "s/^/,,develop-test,/" >>$case_file # tar source code for run.sh to use # if [ $ent -eq 0 ]; then diff --git a/tests/pytest/cluster/TD-3693/insert1Data.json b/tests/pytest/cluster/TD-3693/insert1Data.json index 3ac289a63a846c7de117ce6171ad023ca3f56211..6900ce0366971a71a0e119f0b7cfc363f78cd656 100644 --- a/tests/pytest/cluster/TD-3693/insert1Data.json +++ b/tests/pytest/cluster/TD-3693/insert1Data.json @@ -18,19 +18,19 @@ "name": "db1", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 3650, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/cluster/TD-3693/insert2Data.json b/tests/pytest/cluster/TD-3693/insert2Data.json index 25717df4c76f59e8ef7d638c8793a391ff338a7c..e55fa996fb5099ba7d0702172671bb489ec28213 100644 --- a/tests/pytest/cluster/TD-3693/insert2Data.json +++ b/tests/pytest/cluster/TD-3693/insert2Data.json @@ -18,19 +18,19 @@ "name": "db2", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 3650, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/dockerCluster/insert.json b/tests/pytest/dockerCluster/insert.json index 2f3cf0f0d9c98abdb31c19ad833098e23e0541f2..32e1043c4e722c379d2256ed6bb7d7a11bd7a8da 100644 --- a/tests/pytest/dockerCluster/insert.json +++ b/tests/pytest/dockerCluster/insert.json @@ -15,17 +15,17 @@ "drop": "no", "replica": 1, "days": 2, - "cache": 16, - "blocks": 8, + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json b/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json index b2755823ef3e205fe74b16e29dadf0773549a3cf..dc9de1626a4da72ad0dda91a3b42191ff27b165b 100644 --- a/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json +++ b/tests/pytest/manualTest/TD-5114/insertDataDb3Replica2.json @@ -18,19 +18,19 @@ "name": "db3", "drop": "yes", "replica": 2, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/query/nestedQuery/insertData.json b/tests/pytest/query/nestedQuery/insertData.json index d4ef8dbe97ca144f59c0b1c961fe930bfcdbfcb2..1aad170bb0d2f1a986d5ed7aac20b53f6456a794 100644 --- a/tests/pytest/query/nestedQuery/insertData.json +++ b/tests/pytest/query/nestedQuery/insertData.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/insert-interlace.json b/tests/pytest/tools/insert-interlace.json index a5c545d1599ee742cf94a4bc592bf76abe792ae5..0e17edf8fdc90379c93a08b861417c4fd5411d49 100644 --- a/tests/pytest/tools/insert-interlace.json +++ b/tests/pytest/tools/insert-interlace.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json index 9220bc1d17a0ead401c2adaf1e9d3a1455e2db00..bbac60872ef3e9341b69adeb0f6a4e67fb297ad8 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset-createdb.json +++ b/tests/pytest/tools/insert-tblimit-tboffset-createdb.json @@ -17,19 +17,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json index 164d4fe8be99720e291ab3cf745765af92c1f23f..8f795338d25c05f21310bab7d020d436b4009e1a 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json +++ b/tests/pytest/tools/insert-tblimit-tboffset-insertrec.json @@ -17,19 +17,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/insert-tblimit-tboffset.json b/tests/pytest/tools/insert-tblimit-tboffset.json index 0b8e0bd6c550a163bcfe0500a43e88b84e2d27ae..2c2d86c4816e6cf6c9f3469e92b7b2a2f750ab66 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset.json +++ b/tests/pytest/tools/insert-tblimit-tboffset.json @@ -17,19 +17,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/insert-tblimit-tboffset0.json b/tests/pytest/tools/insert-tblimit-tboffset0.json index 55d9e1905592e8e93d6d32a5fc159461c8b0fcb2..ce83ea3e606f80c38f247a44bccf61fc1394329b 100644 --- a/tests/pytest/tools/insert-tblimit-tboffset0.json +++ b/tests/pytest/tools/insert-tblimit-tboffset0.json @@ -17,19 +17,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/insert-tblimit1-tboffset.json b/tests/pytest/tools/insert-tblimit1-tboffset.json index 3a886656617be1e0d38cfef262fae9159eee5227..b15aaf4eed2870468f43d49f0f6578c2d91dc528 100644 --- a/tests/pytest/tools/insert-tblimit1-tboffset.json +++ b/tests/pytest/tools/insert-tblimit1-tboffset.json @@ -17,19 +17,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/insert.json b/tests/pytest/tools/insert.json index 996b91ed06f283fdcd968df9cafc4f58583cbb8d..523561dc6d22cec1152d0e698976b0f8a5cf66c5 100644 --- a/tests/pytest/tools/insert.json +++ b/tests/pytest/tools/insert.json @@ -13,11 +13,11 @@ "name": "db01", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", - "update": 0, + , "maxtablesPerVnode": 1000 }, "super_tables": [{ diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json index 8bd5ddbae8d5ce81269626165b2d275d05135ea5..a11261681a78b4edc85280c666d98db86f370d94 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertMSDB.json @@ -18,19 +18,19 @@ "name": "testdb3", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 3600, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json index 5408a9841ab8a40e4ca7564724b7f6c7f941e0e0..080231551e306a458a4664adb7f9a68df63a1d52 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertNanoDB.json @@ -18,19 +18,19 @@ "name": "testdb1", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ns", "keep": 3600, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json index 13eb80f3cf7f751398babed8e922f9e5b3a4242e..fe0ecbe2deed56e8ab2c90fc655ff92833215de7 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoInsertUSDB.json @@ -18,19 +18,19 @@ "name": "testdb2", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "us", "keep": 3600, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json index 38ac666fac5097d616c17bdfc7e900256827ddf4..1af2952a6940bc78dcc589184f599f5a7d640f1d 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabase.json @@ -18,19 +18,19 @@ "name": "nsdb", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ns", "keep": 3600, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json index 9ef4a0af66e852a01d8ca7d677de4467ea316097..39c5e499096bd6082090f74f2c307629a18f56e2 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseInsertForSub.json @@ -18,19 +18,19 @@ "name": "subnsdb", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ns", "keep": 3600, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json index a09dec21fa9cf3720b68a1fa2e843b49be0544ee..f4dbf1ee411377af6c3779d9e5cba6c3e233ed39 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabaseNow.json @@ -18,19 +18,19 @@ "name": "nsdb2", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ns", "keep": 3600, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json index e99c528c6d62e3b9ce59565e60d21fb562bb836d..84b511a44621d89b2f23f7fabe38fe0cac489ac6 100644 --- a/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json +++ b/tests/pytest/tools/taosdemoAllTest/NanoTestCase/taosdemoTestNanoDatabasecsv.json @@ -18,19 +18,19 @@ "name": "nsdbcsv", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ns", "keep": 3600, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json index 5e53bd7e7d10edea9bdbc56ef9ab737dbb34684e..75dbcb443230f9528530962242aff1a3a4ac4789 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-3453/query-interrupt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json index ad85f9607b72c5d4562266508bfdcf68837c33bd..0c2e9cf34ae9a7529d9430655c67594cb0202114 100644 --- a/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-4985/query-limit-offset.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 36500, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json index 25af3a1041dbcd06319dd6abfeb82fd33240c013..e90474e872b050ccb33c4e40da76d86f14975b7a 100755 --- a/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/TD-5213/insertSigcolumnsNum4096.json @@ -18,19 +18,19 @@ "name": "json", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb_old", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json index c67582fb56288c978a4d86d7e862ee29f95f820c..21603b190272519373b5771616ad3679892653a5 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tnt1r.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json index e3db5476b8d4cdb7cc8ea125fa0557b133b1c0b8..c944c26915063c9e5169f8bb45442f87f47db423 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-1s1tntmr.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json index 0ae3a7194f8320b3919f850e19861f7796d2a5cc..4908d3999cad2037a6ce90b9ab85ddcf69df2ddd 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-disorder.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-disorder.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json index 3ac8882699b11e62aa7486b6076f99b1c5b005d2..03f531f52b74605bd101b246a9ad0b4cb4dbb7ff 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-N00.json @@ -17,19 +17,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 3650, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json index ffa1c91b82db978bc14392126edbf6972bcf2481..ce2a34627b68780105bbc0a6c233c8d8365b8569 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-drop-exist-auto-Y00.json @@ -17,19 +17,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 3650, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-illegal.json b/tests/pytest/tools/taosdemoAllTest/insert-illegal.json index 614402236ac2e1efa48d2647966f0c1cc425f475..6e438b33df5af7321cd40b125cee553f98032b02 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-illegal.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-illegal.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json b/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json index 26e8b7e88dabecade8dd4f983976347380ea3830..54e646a5a049d36d83b1e6e56856ff1dda6aaa46 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-interlace-row.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json index 38975a75a7f1041ffec91d597c9fb28d8a95c7ce..9a47a873dddaebb4710827b3cb60840252d62f4c 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-interval-speed.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newdb.json b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json index 1a19ea00acb50a0140f55bde51ffe53429a099f0..2eb17b1aab5cf26a1cbde8456000a19dd1bef926 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-newdb.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-newdb.json @@ -18,18 +18,18 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, + "comp":2 + + + + "update": 1 }, "super_tables": [{ diff --git a/tests/pytest/tools/taosdemoAllTest/insert-newtable.json b/tests/pytest/tools/taosdemoAllTest/insert-newtable.json index 3115c9ba72692cd7c5d72de030cc7d9110f8c054..abe277bf5b2bf3f60aebd96f315cc67fb0c9caeb 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-newtable.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-newtable.json @@ -18,18 +18,18 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, + "comp":2 + + + + "update": 1 }, "super_tables": [{ diff --git a/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json b/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json index 7fdba4add14e8f91bfe516366b8c936c133f5546..2dae7eb1d727632dca9cfaa6905d33c9fde39487 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-nodbnodrop.json @@ -18,18 +18,18 @@ "name": "dbno", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, + "comp":2 + + + + "update": 1 }, "super_tables": [{ diff --git a/tests/pytest/tools/taosdemoAllTest/insert-offset.json b/tests/pytest/tools/taosdemoAllTest/insert-offset.json index 611b4a898975ec1a0b6f528e47961e0bccacd7af..642d01db3eb97a5611d5fe587d2e77929cb23e84 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-offset.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-offset.json @@ -18,19 +18,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json b/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json index 72e380a66cb3cfd2b3bade57f000bbebbf29baf4..3ef4360aefbca9cb3cae8c04dfe2162075430bd9 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-renewdb.json @@ -18,18 +18,18 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, + "comp":2 + + + + "update": 1 }, "super_tables": [{ diff --git a/tests/pytest/tools/taosdemoAllTest/insert-sample.json b/tests/pytest/tools/taosdemoAllTest/insert-sample.json index 015993227e60123581e4546b0544945f6962921c..5b25281e78361f7c27bd94d024a22afcaf870a77 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-sample.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-sample.json @@ -18,19 +18,19 @@ "name": "dbtest123", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insert-timestep.json b/tests/pytest/tools/taosdemoAllTest/insert-timestep.json index 01d8ac90982b762a2c51edb55db9760f4c7e6f4f..6432fde4baf3d7c7810236bdf2f02e99906b6e02 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert-timestep.json +++ b/tests/pytest/tools/taosdemoAllTest/insert-timestep.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json index 4f31351516e927b4ec7638540c0aca70ed54c022..4e59d8667964a909cffe9dd7f4367d814e7a917a 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json +++ b/tests/pytest/tools/taosdemoAllTest/insertBinaryLenLarge16374AllcolLar49151.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json b/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json index 1634e1cf065c1979d6e62c97daa56ba2bb3fe1e9..80d6817b5d09851be7e31c864e968a5b729e063e 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertChildTab0.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json b/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json index f4e3ec8e9fad638910e644f624d6b4408163c340..a35c28f0acd00ed01b627d2d0619bc8183d97f06 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertChildTabLess0.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json index d9ac2072f1fb5f29f7b5e6540d20d04837e461c2..05d47c3611dd698d86d078805fac0785bd544479 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNum4096.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json index e5e31f75ef2e7ede4a8d1eb202c298c6952559e4..e63b3613ba6fa004f80b1eeefb39bb0011d51b27 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsAndTagNumLarge4096.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json b/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json index fd75f3b43ffa1e5f4c9cb7964ad218d15e0324fc..137e6083864580be49a0d02c5798f16f8046834a 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertColumnsNum0.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json index 197f8a208e85ca4ce57c06518a433ec3a3acbac3..63a4a2ab58a67363d0b69bbf7552c76fd5948699 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json +++ b/tests/pytest/tools/taosdemoAllTest/insertInterlaceRowsLarge1M.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json index 91234d5e48af891c4dfd0fdfd88121e123bf4edc..f3212bc30dcbdb2d8183e1c6050fe3b23ee92748 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json +++ b/tests/pytest/tools/taosdemoAllTest/insertMaxNumPerReq.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 3650, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json index 813eb9af0428d8455bda3c1a17ffdd61337cc617..9711ead80ee17cb5f5b54c3439914262176c5633 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReq0.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json index 554115f3974b24746165e42e7309d9b4d3dd4a50..24c61cfa8cfbc810c573e3468730d33e2132eee7 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json +++ b/tests/pytest/tools/taosdemoAllTest/insertNumOfrecordPerReqless0.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertRestful.json b/tests/pytest/tools/taosdemoAllTest/insertRestful.json index d05e1c249f25c17c37e40626bf0d3c5a96e5fffe..ab7ee9a73b3414937f0843215d1d122448e1eedb 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertRestful.json +++ b/tests/pytest/tools/taosdemoAllTest/insertRestful.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json index f1aa981508f063adccd4cf2f5c6166a16deb9a23..d835822e8f81dd371558de1002ed68487ad0d5e7 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json +++ b/tests/pytest/tools/taosdemoAllTest/insertSigcolumnsNum4096.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json index 88218b4989d5e01178142aa9acf2332b34718826..4c7cdfe39d0ed2dd15abcae7ac6bf75b371e13bf 100644 --- a/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json +++ b/tests/pytest/tools/taosdemoAllTest/insertTagsNumLarge128.json @@ -18,19 +18,19 @@ "name": "db1", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json b/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json index 4637009ca36ef74dd445a166b5fedf782528d513..0f1a874cc364736a68962c1d293fc8cdc78cd8c8 100644 --- a/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json +++ b/tests/pytest/tools/taosdemoAllTest/insert_5M_rows.json @@ -17,19 +17,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 3650, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json b/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json index a6ac674dd724db8647671114b8eb5290a0803044..bdab459987a587554c001c239c570afd3e7f8636 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_block1_comp.json @@ -17,19 +17,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, + + "blocks": 3, "precision": "ms", "keep": 3650, "minRows": 1000, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/taosdemoAllTest/manual_block2.json b/tests/pytest/tools/taosdemoAllTest/manual_block2.json index 434159159b4dfe942af1c334fd9520d81214e6cb..763421c7f3bdb47509c354818e02b9a2b20ce5bd 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_block2.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_block2.json @@ -17,19 +17,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 3650, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json index 7b8abd6d4e25991d38ff16c737bf8169c7311318..0579aedf69a74ad111b8f92808f7046bd0de24c8 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_A.json @@ -24,12 +24,12 @@ "keep": 10, "minRows": 1000, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json index aeee6322e5c0e6b58c0433be5f345e7c4f84f339..d541cb656778fc59fc1f3746fadcca0ced456e0a 100644 --- a/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json +++ b/tests/pytest/tools/taosdemoAllTest/manual_change_time_1_1_B.json @@ -24,12 +24,12 @@ "keep": 10, "minRows": 1000, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json index ad6cb8118da9f8f37041778e7ea6dfbcbc9f6b29..c134391a5f759e32a8e9752deab7205e8cb1aa49 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit1.json @@ -19,19 +19,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json index 7109dab53f78783c1d624210a85aec31fbcf1507..e9f759f8f7167c749bc3617545ee8c926248bf71 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit5.json @@ -19,19 +19,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json index a98a185b54464aedddd85d5ea4834d6107dd216b..9b46ff105b3217ee54ee6c0684136c7033995a05 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-limit94.json @@ -19,19 +19,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json index e2f3fb037969901cc25e474302cdeee9a08163c0..fdcaa131e6742f767a04ac52b7f9853b5757dcfb 100644 --- a/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json +++ b/tests/pytest/tools/taosdemoAllTest/moredemo-offset-newdb.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/query-interrupt.json b/tests/pytest/tools/taosdemoAllTest/query-interrupt.json index 643cbf09c83f7191620dee32787caa9f5754ad18..01028f68ad9a6f3aa870d0c1b1e38562e896abe4 100644 --- a/tests/pytest/tools/taosdemoAllTest/query-interrupt.json +++ b/tests/pytest/tools/taosdemoAllTest/query-interrupt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json index 99138e36668971ee2e9aa0656b2ee76f262723e3..0fc789c7e30f1d0f74d4e10df635df738b4411be 100644 --- a/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json +++ b/tests/pytest/tools/taosdemoAllTest/queryInsertdata.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json index 747f7b3c7e9ebb5720cae98811e136ece74d47e2..940adfb61c6fc294f7b286514c2808269e8c9e66 100644 --- a/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json +++ b/tests/pytest/tools/taosdemoAllTest/queryInsertrestdata.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json index b3e1024647ff14d0a4a47759e0c9aceab0ac5240..b2805a38e51d86e80838efb753c0f10c94b2c5b4 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tnt1r-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json index 26d483f57da2c30c7ab5d466f6b0b2cb3e5450b0..ac540befb637b0105a4f718228db11dc3f51ca01 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-1s1tntmr-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json index b1cd882bbf38545d1a3e7d4999fc4f6e0d5c4025..9a7ad93636f6578d0adb7553c2d912f38614301d 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-disorder-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json index e541d663fc9f884a7206592271d5124da7746793..919b91839530c0bd5db3338d73698eed19aefda7 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-N00-stmt.json @@ -17,19 +17,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 3650, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json index f32d44240d7f5b717013878358e5d4db378ba354..dcf52931ad40788edd1f7f16f3e7cdd190792b16 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-drop-exist-auto-Y00-stmt.json @@ -17,19 +17,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 3650, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json index c9d93c2423612b3fb4c6ab1f2b5d577f3c64e8cd..d2304ed537d5c18e81c2d93803947396ecb2ed5a 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interlace-row-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json index 7f94fa2e75b930489dc0106d1796df06af43967f..d297240613de0e51dcab3e0582fd041858010eda 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-interval-speed-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json index 339a2555c87f01b8ec6ce84f018dd4787f39d7fd..d117c5b3450e31a8736eea97d36d9d172c74e314 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newdb-stmt.json @@ -18,18 +18,18 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, + "comp":2 + + + + "update": 1 }, "super_tables": [{ diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json index 7e39ddbc0d6233c23d3eb9d5f34e9f0cc6a64360..1b36b3cbe9cc520a625645bf1e1e5b89a6be2a11 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-newtable-stmt.json @@ -18,18 +18,18 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, + "comp":2 + + + + "update": 1 }, "super_tables": [{ diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json index e83a04003324149803f040e61fa6750a20b2afbb..ea95736a00fba7630f8479699397f455b51db45c 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-nodbnodrop-stmt.json @@ -18,18 +18,18 @@ "name": "dbno", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, + "comp":2 + + + + "update": 1 }, "super_tables": [{ diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json index 9502358de0e1eb92730dd6782d21bcaba4f67af5..8318de6672bbcb8c705648f593baf647d3b3f571 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-offset-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "no", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json index 5a500a12580e2fbe9aca206f962304f3310adb3f..b6cb47f2c5f086fd50794fab7b84188ee1162bcf 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-renewdb-stmt.json @@ -18,18 +18,18 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, + "comp":2 + + + + "update": 1 }, "super_tables": [{ diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json index c3f11bf03dad7b7bbc25e2af16488bbd0719bf02..348e93ff8b5b0a1666d22cc017f376a1da120702 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-sample-stmt.json @@ -18,19 +18,19 @@ "name": "dbtest123", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json index d2143366d7c3928495d5a4ef6f83edb5014670f4..edbaae60a14aa8c289d9f3854f654f3da27f37da 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insert-timestep-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json index c6909c6278cdbc6fd85eea04fb7e4e859f6df5cd..1c72b4f402d67070b9b25d6ff8c83923148e1c92 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertBinaryLenLarge16374AllcolLar49151-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json index a5cc009ffb4a5f769d63b8fc4ad1d74f04a76c4b..4626babd9519bd702373dc321a801075df655903 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTab0-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json index d9678a58692af75e06c77451028151658f812a77..f140883de168d77ad83253532fecfee81c9dd7c9 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertChildTabLess0-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json index a448750f74b5ad7219c5f29d744729777f497053..d1d2db2df388a63b7587932cfc0b980f67cce62f 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsAndTagNum4096-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json index 4ec18c49d6c4614f55947d5ab3b9d9a9a84579af..d79d4cace533578d4cf2d55430bef55dd64485c8 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertColumnsNum0-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json index c9dad3dc7f95a7b95682621103c945dff395d3b5..eb0ab0f04ac8f602d83eb5271ae7f5eab86f7d10 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertInterlaceRowsLarge1M-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json index 00c346678f884a06a0611116ad13e47117bad59f..489632c645e732eb9c0fe2fa358947b1e6ba585e 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertMaxNumPerReq-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 3650, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json index 4e47b3b404847a267f47413f6ab297e35cc84b0b..19eb92bf4c8541eca4d6d3306d5e5772998ca719 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReq0-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json index 28e7bbb39bb5d2477842129936ed6584e617e25a..dbda4f74a1d209c5a112028e21fdec27ff390a14 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertNumOfrecordPerReqless0-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json index 39e38afefd7060b6c6a0241521029e84816b999b..966c285d2f7fc197dd8af6a7b8ea9c0caf58aa45 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertSigcolumnsNum4096-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json index f219d3c7a57146a075599eff495ffe93533373ef..c1fc02553fe501e7c09769d947b3f21acc96555e 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/insertTagsNumLarge128-stmt.json @@ -18,19 +18,19 @@ "name": "db1", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json b/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json index 2105398d55b80f14f2fcfcd08f752333e27c031c..1d7ad8a90eb95a86f109214f516d9484b11a53da 100644 --- a/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json +++ b/tests/pytest/tools/taosdemoAllTest/stmt/nsertColumnsAndTagNumLarge4096-stmt.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json index 1f9d794990dcbc0daaee2076f2ae6dfd1249b132..1ca302a320897f7fc04dbbef9aa8a2fea2808724 100644 --- a/tests/pytest/tools/taosdemoAllTest/subInsertdata.json +++ b/tests/pytest/tools/taosdemoAllTest/subInsertdata.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json index d5d0578f07526c18d541391597a3236c99f27544..ef6354627880bf3fde91567e5de3ee518fccb995 100644 --- a/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json +++ b/tests/pytest/tools/taosdemoAllTest/subInsertdataMaxsql100.json @@ -18,19 +18,19 @@ "name": "db", "drop": "yes", "replica": 1, - "days": 10, - "cache": 16, - "blocks": 8, + + + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json index 49ab6f3a4367b4cebd840bb24b43a5d190c0d464..b6e5847b54897814fb9c6e7b1c7f9cb4ed8d29f3 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertMSDB.json @@ -18,19 +18,19 @@ "name": "testdb3", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 36, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json index 9a35df917dcbb2600852e8172da0be3ffacb0d15..ed97fea33e106aff8d2821a10191bd360a629a6b 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertNanoDB.json @@ -18,19 +18,19 @@ "name": "testdb1", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ns", "keep": 36, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json index 631179dbaebfff29de6b38831b78fede989369d4..db34bfc6b8a617b1a57ed687562bb09ade6c24c8 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoInsertUSDB.json @@ -18,19 +18,19 @@ "name": "testdb2", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "us", "keep": 36, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json index 246f1c35f29973fc20602284b37ae68de23f70c1..d029ddea219aca3ce79a19035e6ae1bead016795 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabase.json @@ -18,19 +18,19 @@ "name": "nsdb", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ns", "keep": 36, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json index 0726f3905de2b254b49be51a7973d34b5eb6757e..f8a181d352fad7702cf97aaca9aea3aa1801cab1 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseInsertForSub.json @@ -18,19 +18,19 @@ "name": "subnsdb", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ns", "keep": 36, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json index f36b1f9b4c1b83707b9482428d4303a5418ad2c3..b06ec55ef6157e46a435c0a10ef0144f7e648334 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabaseNow.json @@ -18,19 +18,19 @@ "name": "nsdb2", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ns", "keep": 36, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json index 867619ed8c1497e76077f96d257dd09a489d9eb7..6a6a6da2979869690298978676641d3279cd69b0 100644 --- a/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json +++ b/tests/pytest/tools/taosdemoAllTest/taosdemoTestNanoDatabasecsv.json @@ -18,19 +18,19 @@ "name": "nsdbcsv", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ns", "keep": 36, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tsdb/insertDataDb1.json b/tests/pytest/tsdb/insertDataDb1.json index 60c6def92c9461e2af8e9c0cefc5e574ca61a465..92735dad69790f51cc35878f4c81dc7d81a64b72 100644 --- a/tests/pytest/tsdb/insertDataDb1.json +++ b/tests/pytest/tsdb/insertDataDb1.json @@ -18,19 +18,19 @@ "name": "db1", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tsdb/insertDataDb1Replica2.json b/tests/pytest/tsdb/insertDataDb1Replica2.json index fec38bcdecd9b441ad1c31891e66e7245c43889f..a5fc525157c9d22084f137b9057b4ebe7d2e7c5f 100644 --- a/tests/pytest/tsdb/insertDataDb1Replica2.json +++ b/tests/pytest/tsdb/insertDataDb1Replica2.json @@ -18,19 +18,19 @@ "name": "db1", "drop": "yes", "replica": 2, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tsdb/insertDataDb2.json b/tests/pytest/tsdb/insertDataDb2.json index ead5f19716af8071b49e728ba91c523df9dd5139..02301e024271509642d4aa4c8fa5f19e2b39c939 100644 --- a/tests/pytest/tsdb/insertDataDb2.json +++ b/tests/pytest/tsdb/insertDataDb2.json @@ -18,19 +18,19 @@ "name": "db2", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tsdb/insertDataDb2Newstab.json b/tests/pytest/tsdb/insertDataDb2Newstab.json index f9d0713385265282e938838a10b485ca9cfdd603..2f5f2367b4445f58f67155381536f520a3422a7a 100644 --- a/tests/pytest/tsdb/insertDataDb2Newstab.json +++ b/tests/pytest/tsdb/insertDataDb2Newstab.json @@ -18,19 +18,19 @@ "name": "db2", "drop": "no", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json b/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json index e052f2850fc2fe1e15c651f6150b79fa65c531c1..67f3b2cd4f2cdb08fe8337a8372e35c0b6a2e02b 100644 --- a/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json +++ b/tests/pytest/tsdb/insertDataDb2NewstabReplica2.json @@ -18,19 +18,19 @@ "name": "db2", "drop": "no", "replica": 2, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/tsdb/insertDataDb2Replica2.json b/tests/pytest/tsdb/insertDataDb2Replica2.json index 121f70956a8f1eff31f92bc7fb904835f6bcd0de..3d033f13cc77ac9ecdf0803cf8d014c3b5a9a882 100644 --- a/tests/pytest/tsdb/insertDataDb2Replica2.json +++ b/tests/pytest/tsdb/insertDataDb2Replica2.json @@ -18,19 +18,19 @@ "name": "db2", "drop": "yes", "replica": 2, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py index 6384195a1b89deba2c93876572ea11fd093e500c..33ef92bf735a5211044ebd37c3c8300abd8843a8 100644 --- a/tests/pytest/util/common.py +++ b/tests/pytest/util/common.py @@ -78,8 +78,8 @@ class DataSet: self.float_data.append( float(i * float_step % FLOAT_MAX )) self.double_data.append( float(i * double_step % DOUBLE_MAX )) self.bool_data.append( bool((i + bool_start) % 2 )) - self.vchar_data.append( f"{vchar_prefix}_{i * vchar_step}" ) - self.nchar_data.append( f"{nchar_prefix}_{i * nchar_step}") + self.vchar_data.append( f"{vchar_prefix}{i * vchar_step}" ) + self.nchar_data.append( f"{nchar_prefix}{i * nchar_step}") self.ts_data.append( int(datetime.timestamp(datetime.now()) * 1000 - i * ts_step)) def get_disorder_set(self, rows, **kwargs): diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index b9177d22699d8ef4b36e09a66b6c5716c2e9d7a5..4c50af6bac28e8d0d6d315189a1204725f7a0744 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -194,6 +194,14 @@ class TDSql: args = (caller.filename, caller.lineno, self.sql, self.queryRows, expectRows) tdLog.exit("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + def checkRows_range(self, excepte_row_list): + if self.queryRows in excepte_row_list: + tdLog.info(f"sql:{self.sql}, queryRows:{self.queryRows} in expect:{excepte_row_list}") + return True + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + tdLog.exit(f"{caller.filename}({caller.lineno}) failed: sql:{self.sql}, queryRows:{self.queryRows} not in expect:{excepte_row_list}") + def checkCols(self, expectCols): if self.queryCols == expectCols: tdLog.info("sql:%s, queryCols:%d == expect:%d" % (self.sql, self.queryCols, expectCols)) diff --git a/tests/pytest/util/taosadapter.py b/tests/pytest/util/taosadapter.py index 1a198240d73175d4796286fec2b22687bf034a4c..614eb95d6b4871df8b6f23b9140fae008883d7a5 100644 --- a/tests/pytest/util/taosadapter.py +++ b/tests/pytest/util/taosadapter.py @@ -111,7 +111,7 @@ class TAdapter: self.taosadapter_cfg_dict[key] = value def check_adapter(self): - if getPath(tool="taosadapter"): + if get_path(tool="taosadapter"): return False else: return True diff --git a/tests/pytest/wal/insertDataDb1.json b/tests/pytest/wal/insertDataDb1.json index 1dce00a4d55aae732ae9c85033f49398a0b1a9be..a14fe581412f9497b4c16b94213685f31e06aa0c 100644 --- a/tests/pytest/wal/insertDataDb1.json +++ b/tests/pytest/wal/insertDataDb1.json @@ -18,19 +18,19 @@ "name": "db1", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/wal/insertDataDb1Replica2.json b/tests/pytest/wal/insertDataDb1Replica2.json index fec38bcdecd9b441ad1c31891e66e7245c43889f..a5fc525157c9d22084f137b9057b4ebe7d2e7c5f 100644 --- a/tests/pytest/wal/insertDataDb1Replica2.json +++ b/tests/pytest/wal/insertDataDb1Replica2.json @@ -18,19 +18,19 @@ "name": "db1", "drop": "yes", "replica": 2, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/wal/insertDataDb2.json b/tests/pytest/wal/insertDataDb2.json index 2cf8af580570ac66049ca2248a916337517a6507..891a21f73e195996d7bb5d8539b22b88164efa0c 100644 --- a/tests/pytest/wal/insertDataDb2.json +++ b/tests/pytest/wal/insertDataDb2.json @@ -18,19 +18,19 @@ "name": "db2", "drop": "yes", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/wal/insertDataDb2Newstab.json b/tests/pytest/wal/insertDataDb2Newstab.json index f9d0713385265282e938838a10b485ca9cfdd603..2f5f2367b4445f58f67155381536f520a3422a7a 100644 --- a/tests/pytest/wal/insertDataDb2Newstab.json +++ b/tests/pytest/wal/insertDataDb2Newstab.json @@ -18,19 +18,19 @@ "name": "db2", "drop": "no", "replica": 1, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/wal/insertDataDb2NewstabReplica2.json b/tests/pytest/wal/insertDataDb2NewstabReplica2.json index e052f2850fc2fe1e15c651f6150b79fa65c531c1..67f3b2cd4f2cdb08fe8337a8372e35c0b6a2e02b 100644 --- a/tests/pytest/wal/insertDataDb2NewstabReplica2.json +++ b/tests/pytest/wal/insertDataDb2NewstabReplica2.json @@ -18,19 +18,19 @@ "name": "db2", "drop": "no", "replica": 2, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/pytest/wal/insertDataDb2Replica2.json b/tests/pytest/wal/insertDataDb2Replica2.json index 121f70956a8f1eff31f92bc7fb904835f6bcd0de..3d033f13cc77ac9ecdf0803cf8d014c3b5a9a882 100644 --- a/tests/pytest/wal/insertDataDb2Replica2.json +++ b/tests/pytest/wal/insertDataDb2Replica2.json @@ -18,19 +18,19 @@ "name": "db2", "drop": "yes", "replica": 2, - "days": 10, + "cache": 50, - "blocks": 8, + "precision": "ms", "keep": 365, "minRows": 100, "maxRows": 4096, - "comp":2, - "walLevel":1, - "cachelast":0, - "quorum":1, - "fsync":3000, - "update": 0 + "comp":2 + + + + + }, "super_tables": [{ "name": "stb0", diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index d745ecf706a09e0ff04d9b8a23588c10cbf81062..910b99ace3d098923bbe539b3f19eb35d8b5f6eb 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -27,7 +27,7 @@ ./test.sh -f tsim/db/delete_writing2.sim # unsupport ./test.sh -f tsim/db/dropdnodes.sim ./test.sh -f tsim/db/error1.sim -# TD-17592 ./test.sh -f tsim/db/keep.sim +./test.sh -f tsim/db/keep.sim ./test.sh -f tsim/db/len.sim ./test.sh -f tsim/db/repeat.sim ./test.sh -f tsim/db/show_create_db.sim @@ -89,7 +89,7 @@ ./test.sh -f tsim/parser/alter_column.sim ./test.sh -f tsim/parser/alter_stable.sim ./test.sh -f tsim/parser/alter.sim -# TD-17959 ./test.sh -f tsim/parser/alter1.sim +./test.sh -f tsim/parser/alter1.sim ./test.sh -f tsim/parser/auto_create_tb_drop_tb.sim ./test.sh -f tsim/parser/auto_create_tb.sim ./test.sh -f tsim/parser/between_and.sim @@ -131,9 +131,9 @@ ./test.sh -f tsim/parser/insert_tb.sim # TD-17038 ./test.sh -f tsim/parser/interp.sim ./test.sh -f tsim/parser/join_manyblocks.sim -# TD-17713 ./test.sh -f tsim/parser/join_multitables.sim -# TD-17713 ./test.sh -f tsim/parser/join_multivnode.sim -# TD-17707 ./test.sh -f tsim/parser/join.sim +# TD-18018 ./test.sh -f tsim/parser/join_multitables.sim +./test.sh -f tsim/parser/join_multivnode.sim +./test.sh -f tsim/parser/join.sim ./test.sh -f tsim/parser/last_cache.sim ./test.sh -f tsim/parser/last_groupby.sim ./test.sh -f tsim/parser/lastrow.sim @@ -144,7 +144,7 @@ # TD-17623 ./test.sh -f tsim/parser/limit2.sim ./test.sh -f tsim/parser/mixed_blocks.sim ./test.sh -f tsim/parser/nchar.sim -# TD-17703 ./test.sh -f tsim/parser/nestquery.sim +./test.sh -f tsim/parser/nestquery.sim ./test.sh -f tsim/parser/null_char.sim ./test.sh -f tsim/parser/precision_ns.sim ./test.sh -f tsim/parser/projection_limit_offset.sim @@ -179,7 +179,6 @@ ./test.sh -f tsim/query/scalarFunction.sim ./test.sh -f tsim/query/scalarNull.sim ./test.sh -f tsim/query/session.sim -./test.sh -f tsim/query/udf.sim # ---- qnode ./test.sh -f tsim/qnode/basic1.sim @@ -316,6 +315,7 @@ ./test.sh -f tsim/valgrind/checkError5.sim ./test.sh -f tsim/valgrind/checkError6.sim ./test.sh -f tsim/valgrind/checkError7.sim +./test.sh -f tsim/valgrind/checkUdf.sim # --- vnode # unsupport ./test.sh -f tsim/vnode/replica3_basic.sim @@ -339,7 +339,7 @@ # --- catalog ---- ./test.sh -f tsim/catalog/alterInCurrent.sim -# --- scalar +# --- scalar ---- ./test.sh -f tsim/scalar/in.sim ./test.sh -f tsim/scalar/scalar.sim @@ -384,7 +384,7 @@ ./test.sh -f tsim/compute/sum.sim ./test.sh -f tsim/compute/top.sim -# ---- field +# ---- field ---- ./test.sh -f tsim/field/2.sim ./test.sh -f tsim/field/3.sim ./test.sh -f tsim/field/4.sim @@ -401,7 +401,7 @@ ./test.sh -f tsim/field/tinyint.sim ./test.sh -f tsim/field/unsigined_bigint.sim -# ---- vector +# ---- vector ---- ./test.sh -f tsim/vector/metrics_field.sim ./test.sh -f tsim/vector/metrics_mix.sim ./test.sh -f tsim/vector/metrics_query.sim @@ -414,14 +414,14 @@ ./test.sh -f tsim/vector/table_query.sim ./test.sh -f tsim/vector/table_time.sim -# ---- wal +# ---- wal ---- ./test.sh -f tsim/wal/kill.sim -# ---- tag +# ---- tag ---- ./test.sh -f tsim/tag/3.sim ./test.sh -f tsim/tag/4.sim ./test.sh -f tsim/tag/5.sim -# TD-17382 ./test.sh -f tsim/tag/6.sim +./test.sh -f tsim/tag/6.sim ./test.sh -f tsim/tag/add.sim ./test.sh -f tsim/tag/bigint.sim ./test.sh -f tsim/tag/binary_binary.sim @@ -429,18 +429,18 @@ ./test.sh -f tsim/tag/bool_binary.sim ./test.sh -f tsim/tag/bool_int.sim ./test.sh -f tsim/tag/bool.sim -# TD-17407 ./test.sh -f tsim/tag/change.sim +./test.sh -f tsim/tag/change.sim ./test.sh -f tsim/tag/column.sim ./test.sh -f tsim/tag/commit.sim -# TD-17407 ./test.sh -f tsim/tag/create.sim -# TD-17407 ./test.sh -f tsim/tag/delete.sim -# TD-17407 ./test.sh -f tsim/tag/double.sim +./test.sh -f tsim/tag/create.sim +./test.sh -f tsim/tag/delete.sim +./test.sh -f tsim/tag/double.sim ./test.sh -f tsim/tag/filter.sim -# TD-17407 ./test.sh -f tsim/tag/float.sim +./test.sh -f tsim/tag/float.sim ./test.sh -f tsim/tag/int_binary.sim ./test.sh -f tsim/tag/int_float.sim ./test.sh -f tsim/tag/int.sim -# TD-17959 ./test.sh -f tsim/tag/set.sim +./test.sh -f tsim/tag/set.sim ./test.sh -f tsim/tag/smallint.sim ./test.sh -f tsim/tag/tinyint.sim diff --git a/tests/script/tsim/db/keep.sim b/tests/script/tsim/db/keep.sim index d8939eafbc6558f31a1930d407638f735604a3b2..dc92492a482471c72d81db9175b96361d51a81e6 100644 --- a/tests/script/tsim/db/keep.sim +++ b/tests/script/tsim/db/keep.sim @@ -63,12 +63,10 @@ print ===> rows $rows last $data01 if $rows >= 80 then return -1 endi -if $rows <= 50 then +if $rows <= 45 then return -1 endi -return - print ======== step5 stop dnode system sh/exec.sh -n dnode2 -s stop -x SIGKILL system sh/exec.sh -n dnode2 -s start @@ -78,7 +76,7 @@ print ===> rows $rows last $data01 if $rows >= 80 then return -1 endi -if $rows <= 50 then +if $rows <= 45 then return -1 endi @@ -109,8 +107,8 @@ print ======== step8 insert data $x = 81 while $x < 121 $time = $x . d - sql insert into tb values (now - $time , $x ) -x step4 - step4: + sql insert into tb values (now - $time , $x ) -x step8 + step8: $x = $x + 1 endw diff --git a/tests/script/tsim/insert/basic2.sim b/tests/script/tsim/insert/basic2.sim index 0bd64b1d029bee127fdea53e3ef688055353ef65..eca46697f5bfdca223d922583f98f85838fe25c9 100644 --- a/tests/script/tsim/insert/basic2.sim +++ b/tests/script/tsim/insert/basic2.sim @@ -26,8 +26,8 @@ endi print =============== step3-1 insert records into ct1 sql insert into ct1 values('2022-05-03 16:59:00.010', 10, 20, 'n','n',30); -sql insert into ct1 values('2022-05-03 16:59:00.011', 'N', 'n', 'N',"N",30); -sql insert into ct1 values('2022-05-03 16:59:00.012', 'Nu', 'nul', 'Nul','NUL',30); +sql insert into ct1 values('2022-05-03 16:59:00.011', 'null', 'null', 'N',"N",30); +sql insert into ct1 values('2022-05-03 16:59:00.012', 'null', 'null', 'Nul','NUL',30); sql insert into ct1 values('2022-05-03 16:59:00.013', NULL, 'null', 'Null',null,30); sql insert into ct1 values('2022-05-03 16:59:00.014', NULL, 'NuLL', 'Null',NULL,30); diff --git a/tests/script/tsim/parser/condition_query.sim b/tests/script/tsim/parser/condition_query.sim index 87ea9f3cbeac5f49cbe58e8f89b65dffcc4f6fc0..d9bfcb80742230fe52cac7656dbc91735350a63d 100644 --- a/tests/script/tsim/parser/condition_query.sim +++ b/tests/script/tsim/parser/condition_query.sim @@ -37,7 +37,7 @@ sql select * from stb1 where c8 < 'nuLl'; sql select * from stb1 where c9 > 'nuLl'; sql_error select * from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b; sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 or b.c1 < 60; -sql_error select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60)); +sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60)); sql select * from stb1 where 'c2' is null; sql select * from stb1 where 'c2' is not null; @@ -1475,66 +1475,66 @@ if $data20 != @21-05-05 18:19:25.000@ then return -1 endi -#sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 order by ts; -#sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50; -#if $rows != 4 then -# return -1 -#endi -#if $data00 != @21-05-05 18:19:20.000@ then -# return -1 -#endi -#if $data10 != @21-05-05 18:19:21.000@ then -# return -1 -#endi -#if $data20 != @21-05-05 18:19:24.000@ then -# return -1 -#endi -#if $data30 != @21-05-05 18:19:25.000@ then -# return -1 -#endi +sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 order by ts; +sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:21.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:24.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:25.000@ then + return -1 +endi -#sql select a.ts,a.c1,a.c8,a.c9 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts; -#sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts; -#if $rows != 2 then -# return -1 -#endi -#if $data00 != @21-05-05 18:19:20.000@ then -# return -1 -#endi -#if $data10 != @21-05-05 18:19:21.000@ then -# return -1 -#endi -# -#sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts; -#if $rows != 4 then -# return -1 -#endi -#if $data00 != @21-05-05 18:19:00.000@ then -# return -1 -#endi -#if $data10 != @21-05-05 18:19:02.000@ then -# return -1 -#endi -#if $data20 != @21-05-05 18:19:12.000@ then -# return -1 -#endi -#if $data30 != @21-05-05 18:19:14.000@ then -# return -1 -#endi -# -#sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts; -#if $rows != 3 then -# return -1 -#endi -#if $data00 != @21-05-05 18:19:04.000@ then -# return -1 -#endi -#if $data10 != @21-05-05 18:19:06.000@ then -# return -1 -#endi -#if $data20 != @21-05-05 18:19:10.000@ then -# return -1 -#endi +sql select a.ts,a.c1,a.c8,a.c9 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts; +sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 and b.c1 < 60 order by ts; +if $rows != 2 then + return -1 +endi +if $data00 != @21-05-05 18:19:20.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:21.000@ then + return -1 +endi + +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts; +if $rows != 4 then + return -1 +endi +if $data00 != @21-05-05 18:19:00.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:02.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:12.000@ then + return -1 +endi +if $data30 != @21-05-05 18:19:14.000@ then + return -1 +endi + +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts; +if $rows != 3 then + return -1 +endi +if $data00 != @21-05-05 18:19:04.000@ then + return -1 +endi +if $data10 != @21-05-05 18:19:06.000@ then + return -1 +endi +if $data20 != @21-05-05 18:19:10.000@ then + return -1 +endi sql select * from stb1 where c1 is null and c1 is not null; if $rows != 0 then @@ -2574,7 +2574,7 @@ sql select * from stb1 where c1 > 0 or t1 > 0 and c1 > 1 sql select * from stb1 where t1 > 0 or t1 > 0 and c1 > 1 sql select * from stb1 where (c1 > 0 and t1 > 0 ) or (t1 > 1 and c1 > 3) sql select * from stb1 where (c1 > 0 and t1 > 0 ) or t1 > 1 -sql_error select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.t1=b.t1; +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.t1=b.t1; sql select * from stb1 where c1 < 63 and t1 > 5 if $rows != 2 then @@ -2631,7 +2631,7 @@ sql_error select tb1.ts,tb1.c1,tb2_1.u1 from tb1, tb2_1 where tb1.ts=tb2_1.ts or print "ts&tbname test" -sql_error select count(*) from stb1 where ts > 0 or tbname like 'tb%'; +sql select count(*) from stb1 where ts > 0 or tbname like 'tb%'; print "ts&tag test" sql select count(*) from stb1 where ts > 0 or t1 > 0; @@ -2717,9 +2717,9 @@ print "tbname&tag&join test" print "column&ts&tbname&tag test" -sql_error select * from stb1 where (tbname like 'tb%' or ts > '2021-05-05 18:19:01.000') and (t1 > 5 or t1 < 4) and c1 > 0; +sql select * from stb1 where (tbname like 'tb%' or ts > '2021-05-05 18:19:01.000') and (t1 > 5 or t1 < 4) and c1 > 0; sql select * from stb1 where (ts > '2021-05-05 18:19:01.000') and (ts > '2021-05-05 18:19:02.000' or t1 > 3) and (t1 > 5 or t1 < 4) and c1 > 0; -sql_error select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:20.000' and col > 0 and t1 > 0; +sql select ts,c1,c7 from stb1 where ts > '2021-05-05 18:19:03.000' or ts > '2021-05-05 18:19:20.000' and c1 > 0 and t1 > 0; print "column&ts&tbname&join test" diff --git a/tests/script/tsim/parser/first_last_query.sim b/tests/script/tsim/parser/first_last_query.sim index adad554fb2a7833000dd84e4cf325f64caa32bc8..92c9fd14d64107021d8b215d5236f22c272da2b8 100644 --- a/tests/script/tsim/parser/first_last_query.sim +++ b/tests/script/tsim/parser/first_last_query.sim @@ -283,12 +283,14 @@ print ================== server restart completed sql connect sql use first_db0; -sql select last(*), tbname from m1 group by tbname; +sql select last(*), tbname from m1 group by tbname order by tbname; +print $data00 $data01 $data02 $data10 $data11 $data12 if $rows != 2 then return -1 endi if $data00 != @20-03-01 01:01:01.000@ then + print data00 $data00 != 20-03-01 01:01:01.000@ return -1 endi @@ -312,4 +314,4 @@ if $data12 != @tm1@ then return -1 endi -sql drop table m1 \ No newline at end of file +sql drop table m1 diff --git a/tests/script/tsim/parser/join.sim b/tests/script/tsim/parser/join.sim index 0f41ebd1780d8922a1b2111830645f21514eff74..8ad5946a5452520e6d9ef42fba904c1e4aac2160 100644 --- a/tests/script/tsim/parser/join.sim +++ b/tests/script/tsim/parser/join.sim @@ -320,7 +320,7 @@ sql_error select count(join_tb1.c3), last(join_tb1.c3) from $tb1 , $tb2 where jo sql_error select count(join_tb3.*) from $tb1 , $tb2 where join_tb1.ts = join_tb0.ts and join_tb1.ts <= 100002 and join_tb0.c7 = true; sql_error select first(join_tb1.*) from $tb1 , $tb2 where join_tb1.ts = join_tb0.ts and join_tb1.ts <= 100002 or join_tb0.c7 = true; sql_error select join_tb3.* from $tb1 , $tb2 where join_tb1.ts = join_tb0.ts and join_tb1.ts <= 100002 and join_tb0.c7 = true; -sql_error select join_tb1.* from $tb1 , $tb2 where join_tb1.ts = join_tb0.ts and join_tb1.ts = join_tb0.c1; +sql select join_tb1.* from $tb1 , $tb2 where join_tb1.ts = join_tb0.ts and join_tb1.ts = join_tb0.c1; sql_error select join_tb1.* from $tb1 , $tb2 where join_tb1.ts = join_tb0.c1; sql_error select join_tb1.* from $tb1 , $tb2 where join_tb1.c7 = join_tb0.c1; sql_error select join_tb1.* from $tb1 , $tb2 where join_tb1.ts > join_tb0.ts; @@ -407,32 +407,32 @@ if $data00 != 396.000000000 then endi # first/last -sql select count(join_mt0.c1), sum(join_mt1.c2), first(join_mt0.c5), last(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts and join_mt0.t1=1 interval(10a) order by join_mt0.ts asc; +sql select count(join_mt0.c1), sum(join_mt1.c2), first(join_mt0.c5), last(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts and join_mt0.t1=1 interval(10a) order by _wstart asc; $val = 100 if $rows != $val then print $rows return -1 endi - +print $data00 $data01 $data02 $val = 10 -if $data01 != $val then +if $data00 != $val then return -1 endi $val = 45.000000000 -print $data02 -if $data02 != $val then +print $data01 +if $data01 != $val then return -1 endi $val = 0 -if $data03 != 0 then +if $data02 != 0 then return -1 endi # order by first/last -sql select count(join_mt0.c1), sum(join_mt1.c2), first(join_mt0.c5), last(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts and join_mt0.t1=1 interval(10a) order by join_mt0.ts desc; +sql select count(join_mt0.c1), sum(join_mt1.c2), first(join_mt0.c5), last(join_mt1.c7) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1 and join_mt0.ts=join_mt1.ts and join_mt0.t1=1 interval(10a) order by _wstart desc; $val = 100 if $rows != $val then @@ -453,13 +453,13 @@ print =================>"group by not supported" #======================limit offset=================================== # tag values not int -sql_error select count(*) from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t2=join_mt1.t2; +sql select count(*) from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t2=join_mt1.t2; # tag type not identical -sql_error select count(*) from join_mt0, join_mt1 where join_mt1.t2 = join_mt0.t1 and join_mt1.ts=join_mt0.ts; +sql select count(*) from join_mt0, join_mt1 where join_mt1.t2 = join_mt0.t1 and join_mt1.ts=join_mt0.ts; # table/super table join -sql_error select count(join_mt0.c1) from join_mt0, join_tb1 where join_mt0.ts=join_tb1.ts +sql select count(join_mt0.c1) from join_mt0, join_tb1 where join_mt0.ts=join_tb1.ts # multi-condition @@ -471,7 +471,7 @@ sql_error select count(join_mt0.c1), count(join_mt0.c2) from join_mt0, join_mt0 sql_error select sum(join_mt1.c2) from join_mt0, join_mt1 where join_mt0.t1=join_mt1.t1; # missing tag equals -sql_error select count(join_mt1.c3) from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts; +sql select count(join_mt1.c3) from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts; # tag values are identical error sql create table m1(ts timestamp, k int) tags(a int); @@ -486,7 +486,7 @@ sql insert into tm2 using m1 tags(1) values(1000000, 1)(2000000, 2); sql insert into um1 using m2 tags(1) values(1000001, 10)(2000000, 20); sql insert into um2 using m2 tags(9) values(1000001, 10)(2000000, 20); -sql_error select count(*) from m1,m2 where m1.a=m2.a and m1.ts=m2.ts; +sql select count(*) from m1,m2 where m1.a=m2.a and m1.ts=m2.ts; print ====> empty table/empty super-table join test, add for no result join test sql create database ux1; diff --git a/tests/script/tsim/parser/join_multitables.sim b/tests/script/tsim/parser/join_multitables.sim index 7a1c77acffc61abcba1524ed9800d3af55978bee..6c5138f8cbbb0f358686f0beb3eb7b582f5496eb 100644 --- a/tests/script/tsim/parser/join_multitables.sim +++ b/tests/script/tsim/parser/join_multitables.sim @@ -4,9 +4,7 @@ system sh/exec.sh -n dnode1 -s start sql connect print ======================== dnode1 start - $db = testdb - sql create database $db sql use $db @@ -200,7 +198,6 @@ sql insert into tb3_5 values('2021-03-03 05:00:00.000', 9935,9935.0,'35'); sql insert into tb3_5 values('2021-03-04 05:00:00.000', 9935,9935.0,'35'); sql insert into tb3_5 values('2021-03-05 05:00:00.000', 9935,9935.0,'35'); - sql insert into tb4_1 values('2021-03-01 01:00:00.000', 9941,9941.0,'41'); sql insert into tb4_1 values('2021-03-02 01:00:00.000', 9941,9941.0,'41'); sql insert into tb4_1 values('2021-03-03 01:00:00.000', 9941,9941.0,'41'); @@ -409,13 +406,10 @@ sql insert into tbb_5 values('2021-03-03 05:00:00.000', 99115,99115.0,'b5'); sql insert into tbb_5 values('2021-03-04 05:00:00.000', 99115,99115.0,'b5'); sql insert into tbb_5 values('2021-03-05 05:00:00.000', 99115,99115.0,'b5'); - sql select * from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1; - if $rows != 25 then return -1 endi - if $data00 != @21-03-01 01:00:00.000@ then return -1 endi @@ -448,248 +442,10 @@ if $data09 != @21-03-01 01:00:00.000@ then return -1 endi -if $data10 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data11 != 9901 then - return -1 -endi -if $data12 != 9901.000000000 then - return -1 -endi -if $data13 != 01 then - return -1 -endi -if $data14 != 0 then - return -1 -endi -if $data15 != 1 then - return -1 -endi -if $data16 != 2.000000000 then - return -1 -endi -if $data17 != 1 then - return -1 -endi -if $data18 != 3 then - return -1 -endi -if $data19 != @21-03-02 01:00:00.000@ then - return -1 -endi - - - -if $data20 != @21-03-03 01:00:00.000@ then - return -1 -endi -if $data21 != 9901 then - return -1 -endi -if $data22 != 9901.000000000 then - return -1 -endi -if $data23 != 01 then - return -1 -endi -if $data24 != 0 then - return -1 -endi -if $data25 != 1 then - return -1 -endi -if $data26 != 2.000000000 then - return -1 -endi -if $data27 != 1 then - return -1 -endi -if $data28 != 3 then - return -1 -endi -if $data29 != @21-03-03 01:00:00.000@ then - return -1 -endi - - -if $data30 != @21-03-04 01:00:00.000@ then - return -1 -endi -if $data31 != 9901 then - return -1 -endi -if $data32 != 9901.000000000 then - return -1 -endi -if $data33 != 01 then - return -1 -endi -if $data34 != 0 then - return -1 -endi -if $data35 != 1 then - return -1 -endi -if $data36 != 2.000000000 then - return -1 -endi -if $data37 != 1 then - return -1 -endi -if $data38 != 3 then - return -1 -endi -if $data39 != @21-03-04 01:00:00.000@ then - return -1 -endi - - - - sql select * from st0, st1 where st0.ts=st1.ts and st0.id2=st1.id2; - -if $rows != 25 then - return -1 -endi - -if $data00 != @21-03-01 01:00:00.000@ then - return -1 -endi -if $data01 != 9901 then - return -1 -endi -if $data02 != 9901.000000000 then - return -1 -endi -if $data03 != 01 then - return -1 -endi -if $data04 != 0 then - return -1 -endi -if $data05 != 1 then - return -1 -endi -if $data06 != 2.000000000 then - return -1 -endi -if $data07 != 1 then - print $data07 - return -1 -endi -if $data08 != 3 then - return -1 -endi -if $data09 != @21-03-01 01:00:00.000@ then - return -1 -endi - -if $data10 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data11 != 9901 then - return -1 -endi -if $data12 != 9901.000000000 then - return -1 -endi -if $data13 != 01 then - return -1 -endi -if $data14 != 0 then - return -1 -endi -if $data15 != 1 then - return -1 -endi -if $data16 != 2.000000000 then - return -1 -endi -if $data17 != 1 then - return -1 -endi -if $data18 != 3 then - return -1 -endi -if $data19 != @21-03-02 01:00:00.000@ then - return -1 -endi - - - -if $data20 != @21-03-03 01:00:00.000@ then - return -1 -endi -if $data21 != 9901 then - return -1 -endi -if $data22 != 9901.000000000 then - return -1 -endi -if $data23 != 01 then - return -1 -endi -if $data24 != 0 then - return -1 -endi -if $data25 != 1 then - return -1 -endi -if $data26 != 2.000000000 then - return -1 -endi -if $data27 != 1 then - return -1 -endi -if $data28 != 3 then - return -1 -endi -if $data29 != @21-03-03 01:00:00.000@ then - return -1 -endi - - -if $data30 != @21-03-04 01:00:00.000@ then - return -1 -endi -if $data31 != 9901 then - return -1 -endi -if $data32 != 9901.000000000 then - return -1 -endi -if $data33 != 01 then - return -1 -endi -if $data34 != 0 then - return -1 -endi -if $data35 != 1 then - return -1 -endi -if $data36 != 2.000000000 then - return -1 -endi -if $data37 != 1 then - return -1 -endi -if $data38 != 3 then - return -1 -endi -if $data39 != @21-03-04 01:00:00.000@ then - return -1 -endi - - - - -sql select * from st0, st1 where st0.id3=st1.id3 and st1.ts=st0.ts; - if $rows != 25 then return -1 endi - if $data00 != @21-03-01 01:00:00.000@ then return -1 endi @@ -715,642 +471,99 @@ if $data07 != 1 then print $data07 return -1 endi -if $data08 != 3 then - return -1 -endi -if $data09 != @21-03-01 01:00:00.000@ then - return -1 -endi - -if $data10 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data11 != 9901 then - return -1 -endi -if $data12 != 9901.000000000 then - return -1 -endi -if $data13 != 01 then - return -1 -endi -if $data14 != 0 then - return -1 -endi -if $data15 != 1 then - return -1 -endi -if $data16 != 2.000000000 then - return -1 -endi -if $data17 != 1 then - return -1 -endi -if $data18 != 3 then - return -1 -endi -if $data19 != @21-03-02 01:00:00.000@ then - return -1 -endi - - - -if $data20 != @21-03-03 01:00:00.000@ then - return -1 -endi -if $data21 != 9901 then - return -1 -endi -if $data22 != 9901.000000000 then - return -1 -endi -if $data23 != 01 then - return -1 -endi -if $data24 != 0 then - return -1 -endi -if $data25 != 1 then - return -1 -endi -if $data26 != 2.000000000 then - return -1 -endi -if $data27 != 1 then - return -1 -endi -if $data28 != 3 then - return -1 -endi -if $data29 != @21-03-03 01:00:00.000@ then - return -1 -endi - - -if $data30 != @21-03-04 01:00:00.000@ then - return -1 -endi -if $data31 != 9901 then - return -1 -endi -if $data32 != 9901.000000000 then - return -1 -endi -if $data33 != 01 then - return -1 -endi -if $data34 != 0 then - return -1 -endi -if $data35 != 1 then - return -1 -endi -if $data36 != 2.000000000 then - return -1 -endi -if $data37 != 1 then - return -1 -endi -if $data38 != 3 then - return -1 -endi -if $data39 != @21-03-04 01:00:00.000@ then - return -1 -endi - - - - - -sql select * from st0, st1 where st1.id5=st0.id5 and st0.ts=st1.ts; - -if $rows != 25 then - return -1 -endi - -if $data00 != @21-03-01 01:00:00.000@ then - return -1 -endi -if $data01 != 9901 then - return -1 -endi -if $data02 != 9901.000000000 then - return -1 -endi -if $data03 != 01 then - return -1 -endi -if $data04 != 0 then - return -1 -endi -if $data05 != 1 then - return -1 -endi -if $data06 != 2.000000000 then - return -1 -endi -if $data07 != 1 then - print $data07 - return -1 -endi -if $data08 != 3 then - return -1 -endi -if $data09 != @21-03-01 01:00:00.000@ then - return -1 -endi - -if $data10 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data11 != 9901 then - return -1 -endi -if $data12 != 9901.000000000 then - return -1 -endi -if $data13 != 01 then - return -1 -endi -if $data14 != 0 then - return -1 -endi -if $data15 != 1 then - return -1 -endi -if $data16 != 2.000000000 then - return -1 -endi -if $data17 != 1 then - return -1 -endi -if $data18 != 3 then - return -1 -endi -if $data19 != @21-03-02 01:00:00.000@ then - return -1 -endi - - - -if $data20 != @21-03-03 01:00:00.000@ then - return -1 -endi -if $data21 != 9901 then - return -1 -endi -if $data22 != 9901.000000000 then - return -1 -endi -if $data23 != 01 then - return -1 -endi -if $data24 != 0 then - return -1 -endi -if $data25 != 1 then - return -1 -endi -if $data26 != 2.000000000 then - return -1 -endi -if $data27 != 1 then - return -1 -endi -if $data28 != 3 then - return -1 -endi -if $data29 != @21-03-03 01:00:00.000@ then - return -1 -endi - - -if $data30 != @21-03-04 01:00:00.000@ then - return -1 -endi -if $data31 != 9901 then - return -1 -endi -if $data32 != 9901.000000000 then - return -1 -endi -if $data33 != 01 then - return -1 -endi -if $data34 != 0 then - return -1 -endi -if $data35 != 1 then - return -1 -endi -if $data36 != 2.000000000 then - return -1 -endi -if $data37 != 1 then - return -1 -endi -if $data38 != 3 then - return -1 -endi -if $data39 != @21-03-04 01:00:00.000@ then - return -1 -endi - - - - -sql select st0.* from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1; - -if $rows != 25 then - return -1 -endi - -if $data00 != @21-03-01 01:00:00.000@ then - return -1 -endi -if $data01 != 9901 then - return -1 -endi -if $data02 != 9901.000000000 then - return -1 -endi -if $data03 != 01 then - return -1 -endi -if $data04 != 0 then - return -1 -endi -if $data05 != 1 then - return -1 -endi -if $data06 != 2.000000000 then - return -1 -endi -if $data07 != 1 then - print $data07 - return -1 -endi -if $data08 != 3 then - return -1 -endi - -if $data10 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data11 != 9901 then - return -1 -endi -if $data12 != 9901.000000000 then - return -1 -endi -if $data13 != 01 then - return -1 -endi -if $data14 != 0 then - return -1 -endi -if $data15 != 1 then - return -1 -endi -if $data16 != 2.000000000 then - return -1 -endi -if $data17 != 1 then - return -1 -endi -if $data18 != 3 then - return -1 -endi - - - -if $data20 != @21-03-03 01:00:00.000@ then - return -1 -endi -if $data21 != 9901 then - return -1 -endi -if $data22 != 9901.000000000 then - return -1 -endi -if $data23 != 01 then - return -1 -endi -if $data24 != 0 then - return -1 -endi -if $data25 != 1 then - return -1 -endi -if $data26 != 2.000000000 then - return -1 -endi -if $data27 != 1 then - return -1 -endi -if $data28 != 3 then - return -1 -endi - - -if $data30 != @21-03-04 01:00:00.000@ then - return -1 -endi -if $data31 != 9901 then - return -1 -endi -if $data32 != 9901.000000000 then - return -1 -endi -if $data33 != 01 then - return -1 -endi -if $data34 != 0 then - return -1 -endi -if $data35 != 1 then - return -1 -endi -if $data36 != 2.000000000 then - return -1 -endi -if $data37 != 1 then - return -1 -endi -if $data38 != 3 then - return -1 -endi - - - - -sql select st0.* from st0, st1 where st0.ts=st1.ts and st1.id2=st0.id2; - -if $rows != 25 then - return -1 -endi - -if $data00 != @21-03-01 01:00:00.000@ then - return -1 -endi -if $data01 != 9901 then - return -1 -endi -if $data02 != 9901.000000000 then - return -1 -endi -if $data03 != 01 then - return -1 -endi -if $data04 != 0 then - return -1 -endi -if $data05 != 1 then - return -1 -endi -if $data06 != 2.000000000 then - return -1 -endi -if $data07 != 1 then - print $data07 - return -1 -endi -if $data08 != 3 then - return -1 -endi - -if $data10 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data11 != 9901 then - return -1 -endi -if $data12 != 9901.000000000 then - return -1 -endi -if $data13 != 01 then - return -1 -endi -if $data14 != 0 then - return -1 -endi -if $data15 != 1 then - return -1 -endi -if $data16 != 2.000000000 then - return -1 -endi -if $data17 != 1 then - return -1 -endi -if $data18 != 3 then - return -1 -endi - - - -if $data20 != @21-03-03 01:00:00.000@ then - return -1 -endi -if $data21 != 9901 then - return -1 -endi -if $data22 != 9901.000000000 then - return -1 -endi -if $data23 != 01 then - return -1 -endi -if $data24 != 0 then - return -1 -endi -if $data25 != 1 then - return -1 -endi -if $data26 != 2.000000000 then - return -1 -endi -if $data27 != 1 then - return -1 -endi -if $data28 != 3 then - return -1 -endi - - -if $data30 != @21-03-04 01:00:00.000@ then - return -1 -endi -if $data31 != 9901 then - return -1 -endi -if $data32 != 9901.000000000 then - return -1 -endi -if $data33 != 01 then - return -1 -endi -if $data34 != 0 then - return -1 -endi -if $data35 != 1 then - return -1 -endi -if $data36 != 2.000000000 then - return -1 -endi -if $data37 != 1 then - return -1 -endi -if $data38 != 3 then - return -1 -endi - - - - - -sql select st0.* from st0, st1 where st0.id3=st1.id3 and st1.ts=st0.ts; - -if $rows != 25 then - return -1 -endi - -if $data00 != @21-03-01 01:00:00.000@ then - return -1 -endi -if $data01 != 9901 then - return -1 -endi -if $data02 != 9901.000000000 then - return -1 -endi -if $data03 != 01 then - return -1 -endi -if $data04 != 0 then - return -1 -endi -if $data05 != 1 then - return -1 -endi -if $data06 != 2.000000000 then - return -1 -endi -if $data07 != 1 then - print $data07 - return -1 -endi -if $data08 != 3 then - return -1 -endi - -if $data10 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data11 != 9901 then - return -1 -endi -if $data12 != 9901.000000000 then - return -1 -endi -if $data13 != 01 then +if $data08 != 3 then return -1 endi -if $data14 != 0 then +if $data09 != @21-03-01 01:00:00.000@ then return -1 endi -if $data15 != 1 then + +sql select * from st0, st1 where st0.id3=st1.id3 and st1.ts=st0.ts; +if $rows != 25 then return -1 endi -if $data16 != 2.000000000 then +if $data00 != @21-03-01 01:00:00.000@ then return -1 endi -if $data17 != 1 then +if $data01 != 9901 then return -1 endi -if $data18 != 3 then +if $data02 != 9901.000000000 then return -1 endi - - - -if $data20 != @21-03-03 01:00:00.000@ then +if $data03 != 01 then return -1 endi -if $data21 != 9901 then +if $data04 != 0 then return -1 endi -if $data22 != 9901.000000000 then +if $data05 != 1 then return -1 endi -if $data23 != 01 then +if $data06 != 2.000000000 then return -1 endi -if $data24 != 0 then +if $data07 != 1 then + print $data07 return -1 endi -if $data25 != 1 then +if $data08 != 3 then return -1 endi -if $data26 != 2.000000000 then +if $data09 != @21-03-01 01:00:00.000@ then return -1 endi -if $data27 != 1 then + +sql select * from st0, st1 where st1.id5=st0.id5 and st0.ts=st1.ts; +if $rows != 25 then return -1 endi -if $data28 != 3 then +if $data00 != @21-03-01 01:00:00.000@ then return -1 endi - - -if $data30 != @21-03-04 01:00:00.000@ then +if $data01 != 9901 then return -1 endi -if $data31 != 9901 then +if $data02 != 9901.000000000 then return -1 endi -if $data32 != 9901.000000000 then +if $data03 != 01 then return -1 endi -if $data33 != 01 then +if $data04 != 0 then return -1 endi -if $data34 != 0 then +if $data05 != 1 then return -1 endi -if $data35 != 1 then +if $data06 != 2.000000000 then return -1 endi -if $data36 != 2.000000000 then +if $data07 != 1 then + print $data07 return -1 endi -if $data37 != 1 then +if $data08 != 3 then return -1 endi -if $data38 != 3 then +if $data09 != @21-03-01 01:00:00.000@ then return -1 endi - - - -sql select st1.* from st0, st1 where st1.id5=st0.id5 and st0.ts=st1.ts; - +sql select st0.* from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1; if $rows != 25 then return -1 endi - if $data00 != @21-03-01 01:00:00.000@ then return -1 endi -if $data01 != 9911 then +if $data01 != 9901 then return -1 endi -if $data02 != 9911.000000000 then +if $data02 != 9901.000000000 then return -1 endi -if $data03 != 11 then +if $data03 != 01 then return -1 endi if $data04 != 0 then @@ -1370,100 +583,109 @@ if $data08 != 3 then return -1 endi -if $data10 != @21-03-02 01:00:00.000@ then +sql select st0.* from st0, st1 where st0.ts=st1.ts and st1.id2=st0.id2; +if $rows != 25 then return -1 endi -if $data11 != 9911 then +if $data00 != @21-03-01 01:00:00.000@ then return -1 endi -if $data12 != 9911.000000000 then +if $data01 != 9901 then return -1 endi -if $data13 != 11 then +if $data02 != 9901.000000000 then return -1 endi -if $data14 != 0 then +if $data03 != 01 then return -1 endi -if $data15 != 1 then +if $data04 != 0 then return -1 endi -if $data16 != 2.000000000 then +if $data05 != 1 then return -1 endi -if $data17 != 1 then +if $data06 != 2.000000000 then return -1 endi -if $data18 != 3 then +if $data07 != 1 then + print $data07 + return -1 +endi +if $data08 != 3 then return -1 endi - - -if $data20 != @21-03-03 01:00:00.000@ then +sql select st0.* from st0, st1 where st0.id3=st1.id3 and st1.ts=st0.ts; +if $rows != 25 then return -1 endi -if $data21 != 9911 then +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then return -1 endi -if $data22 != 9911.000000000 then +if $data02 != 9901.000000000 then return -1 endi -if $data23 != 11 then +if $data03 != 01 then return -1 endi -if $data24 != 0 then +if $data04 != 0 then return -1 endi -if $data25 != 1 then +if $data05 != 1 then return -1 endi -if $data26 != 2.000000000 then +if $data06 != 2.000000000 then return -1 endi -if $data27 != 1 then +if $data07 != 1 then + print $data07 return -1 endi -if $data28 != 3 then +if $data08 != 3 then return -1 endi - -if $data30 != @21-03-04 01:00:00.000@ then +sql select st1.* from st0, st1 where st1.id5=st0.id5 and st0.ts=st1.ts; +if $rows != 25 then return -1 endi -if $data31 != 9911 then +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9911 then return -1 endi -if $data32 != 9911.000000000 then +if $data02 != 9911.000000000 then return -1 endi -if $data33 != 11 then +if $data03 != 11 then return -1 endi -if $data34 != 0 then +if $data04 != 0 then return -1 endi -if $data35 != 1 then +if $data05 != 1 then return -1 endi -if $data36 != 2.000000000 then +if $data06 != 2.000000000 then return -1 endi -if $data37 != 1 then +if $data07 != 1 then + print $data07 return -1 endi -if $data38 != 3 then +if $data08 != 3 then return -1 endi - sql select st0.f1,st1.f1 from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1; - if $rows != 25 then return -1 endi - if $data00 != 9901 then return -1 endi @@ -1489,16 +711,10 @@ if $data31 != 9911 then return -1 endi - - - sql select st0.ts,st1.ts from st0, st1 where st0.ts=st1.ts and st1.id2=st0.id2; - - if $rows != 25 then return -1 endi - if $data00 != @21-03-01 01:00:00.000@ then return -1 endi @@ -1524,15 +740,10 @@ if $data31 != @21-03-04 01:00:00.000@ then return -1 endi - - sql select st1.ts,st0.ts,st0.id3,st1.id3,st0.f3,st1.f3 from st0, st1 where st0.id3=st1.id3 and st1.ts=st0.ts; - - if $rows != 25 then return -1 endi - if $data00 != @21-03-01 01:00:00.000@ then return -1 endi @@ -1552,27 +763,6 @@ if $data05 != 11 then return -1 endi -if $data10 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data11 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data02 != 2.000000000 then - return -1 -endi -if $data03 != 2.000000000 then - return -1 -endi -if $data04 != 01 then - return -1 -endi -if $data05 != 11 then - return -1 -endi - - - sql select st0.ts,st0.f2,st1.f3,st1.f2,st0.f3 from st0, st1 where st1.id5=st0.id5 and st0.ts=st1.ts; if $rows != 25 then return -1 @@ -1608,9 +798,6 @@ if $data14 != 01 then return -1 endi - - - sql select last(*) from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 interval(10a); if $rows != 25 then return -1 @@ -1642,35 +829,6 @@ endi if $data08 != 11 then return -1 endi -if $data10 != @21-03-01 02:00:00.000@ then - return -1 -endi -if $data11 != @21-03-01 02:00:00.000@ then - return -1 -endi -if $data12 != 9902 then - return -1 -endi -if $data13 != 9902.000000000 then - return -1 -endi -if $data14 != 02 then - return -1 -endi -if $data15 != @21-03-01 02:00:00.000@ then - return -1 -endi -if $data16 != 9912 then - return -1 -endi -if $data17 != 9912.000000000 then - return -1 -endi -if $data18 != 12 then - return -1 -endi - - sql select last(*) from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 interval(1d) sliding(1d); if $rows != 5 then @@ -1703,35 +861,6 @@ endi if $data08 != 15 then return -1 endi -if $data10 != @21-03-02 00:00:00.000@ then - return -1 -endi -if $data11 != @21-03-02 05:00:00.000@ then - return -1 -endi -if $data12 != 9905 then - return -1 -endi -if $data13 != 9905.000000000 then - return -1 -endi -if $data14 != 05 then - return -1 -endi -if $data15 != @21-03-02 05:00:00.000@ then - return -1 -endi -if $data16 != 9915 then - return -1 -endi -if $data17 != 9915.000000000 then - return -1 -endi -if $data18 != 15 then - return -1 -endi - - sql select st0.*,st1.* from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts and st1.ts=st0.ts and st0.id1=st1.id1; if $rows != 25 then @@ -1769,11 +898,7 @@ if $data09 != @21-03-01 01:00:00.000@ then endi - - - sql select st0.ts,* from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 order by st0.ts; - if $rows != 25 then return -1 endi @@ -1844,8 +969,6 @@ if $data09 != @21-03-01 01:00:00.000@ then return -1 endi - - sql select top(st1.f1, 5) from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts and st1.ts=st0.ts and st0.id1=st1.id1; if $rows != 5 then return -1 @@ -1881,10 +1004,6 @@ if $data41 != 9915 then return -1 endi - - - - sql select top(st0.f1,5) from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts and st1.ts=st0.ts and st0.id1=st1.id1; if $rows != 5 then return -1 @@ -1920,7 +1039,6 @@ if $data41 != 9905 then return -1 endi - #sql select st0.*,st1.*,st2.*,st3.* from st3,st2,st1,st0 where st0.id1=st3.id1 and st3.ts=st2.ts and st2.id1=st1.id1 and st1.ts=st0.ts; #sql select st0.*,st1.*,st2.*,st3.* from st3,st2,st1,st0 where st0.id1=st3.id1 and st3.ts=st2.ts and st2.id1=st1.id1 and st1.ts=st0.ts and st0.id1=st2.id1 and st1.ts=st2.ts; #if $rows != 25 then @@ -1958,8 +1076,6 @@ endi # return -1 #endi - - #sql select st0.*,st1.*,st2.*,st3.* from st3,st2,st1,st0 where st0.id1=st1.id1 and st1.ts=st0.ts and st2.id1=st3.id1 and st3.ts=st2.ts; #sql select st0.*,st1.*,st2.*,st3.* from st3,st2,st1,st0 where st0.id1=st1.id1 and st1.ts=st0.ts and st2.id1=st3.id1 and st3.ts=st2.ts and st0.id1=st2.id1 and st0.ts=st2.ts; #if $rows != 25 then @@ -1996,8 +1112,6 @@ endi # return -1 #endi - - sql select st0.*,st1.*,st2.*,st3.*,st4.*,st5.*,st6.*,st7.*,st8.*,st9.* from st0,st1,st2,st3,st4,st5,st6,st7,st8,st9 where st0.ts=st2.ts and st0.ts=st4.ts and st0.ts=st6.ts and st0.ts=st8.ts and st1.ts=st3.ts and st3.ts=st5.ts and st5.ts=st7.ts and st7.ts=st9.ts and st0.ts=st1.ts and st0.id1=st2.id1 and st0.id1=st4.id1 and st0.id1=st6.id1 and st0.id1=st8.id1 and st1.id1=st3.id1 and st3.id1=st5.id1 and st5.id1=st7.id1 and st7.id1=st9.id1 and st0.id1=st1.id1; if $rows != 25 then return -1 @@ -2033,8 +1147,6 @@ if $data09 != @21-03-01 01:00:00.000@ then return -1 endi - - sql select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.ts=tb1_1.ts; if $rows != 5 then return -1 @@ -2088,8 +1200,6 @@ if $data17 != 11 then return -1 endi - - sql select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.ts=tb1_1.ts and tb0_1.id1=tb1_1.id1; if $rows != 5 then return -1 @@ -2143,20 +1253,15 @@ if $data17 != 11 then return -1 endi - sql select tb0_1.*, tb1_2.*,tb2_3.*,tb3_4.*,tb4_5.* from tb0_1, tb1_2, tb2_3, tb3_4, tb4_5 where tb0_1.ts=tb1_2.ts and tb0_1.ts=tb2_3.ts and tb0_1.ts=tb3_4.ts and tb0_1.ts=tb4_5.ts; if $rows != 0 then return -1 endi - - sql select tb0_1.*, tb1_1.*,tb2_1.*,tb3_1.*,tb4_1.* from tb0_1, tb1_1, tb2_1, tb3_1, tb4_1 where tb0_1.ts=tb1_1.ts and tb0_1.ts=tb2_1.ts and tb0_1.ts=tb3_1.ts and tb0_1.ts=tb4_1.ts; - if $rows != 5 then return -1 endi - if $data00 != @21-03-01 01:00:00.000@ then return -1 endi @@ -2186,46 +1291,12 @@ if $data08 != @21-03-01 01:00:00.000@ then endi if $data09 != 9921 then return -1 -endi -if $data10 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data11 != 9901 then - return -1 -endi -if $data12 != 9901.000000000 then - return -1 -endi -if $data13 != 01 then - return -1 -endi -if $data14 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data15 != 9911 then - return -1 -endi -if $data16 != 9911.000000000 then - return -1 -endi -if $data17 != 11 then - return -1 -endi -if $data18 != @21-03-02 01:00:00.000@ then - return -1 -endi -if $data19 != 9921 then - return -1 -endi - - +endi sql select tb0_5.*, tb1_5.*,tb2_5.*,tb3_5.*,tb4_5.*,tb5_5.*, tb6_5.*,tb7_5.*,tb8_5.*,tb9_5.* from tb0_5, tb1_5, tb2_5, tb3_5, tb4_5,tb5_5, tb6_5, tb7_5, tb8_5, tb9_5 where tb9_5.ts=tb8_5.ts and tb8_5.ts=tb7_5.ts and tb7_5.ts=tb6_5.ts and tb6_5.ts=tb5_5.ts and tb5_5.ts=tb4_5.ts and tb4_5.ts=tb3_5.ts and tb3_5.ts=tb2_5.ts and tb2_5.ts=tb1_5.ts and tb1_5.ts=tb0_5.ts; - if $rows != 5 then return -1 endi - if $data00 != @21-03-01 05:00:00.000@ then return -1 endi @@ -2255,37 +1326,7 @@ if $data08 != @21-03-01 05:00:00.000@ then endi if $data09 != 9925 then return -1 -endi -if $data10 != @21-03-02 05:00:00.000@ then - return -1 -endi -if $data11 != 9905 then - return -1 -endi -if $data12 != 9905.000000000 then - return -1 -endi -if $data13 != 05 then - return -1 -endi -if $data14 != @21-03-02 05:00:00.000@ then - return -1 -endi -if $data15 != 9915 then - return -1 -endi -if $data16 != 9915.000000000 then - return -1 -endi -if $data17 != 15 then - return -1 -endi -if $data18 != @21-03-02 05:00:00.000@ then - return -1 -endi -if $data19 != 9925 then - return -1 -endi +endi sql_error select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.f1=tb1_1.f1; sql_error select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.ts=tb1_1.ts and tb0_1.id1=tb1_1.id2; diff --git a/tests/script/tsim/query/explain.sim b/tests/script/tsim/query/explain.sim index 44f5eb74e78e71be54f813c9032cf6b0b369cd11..fcf91c45da500589c5cd0abdf98072feca1b72ac 100644 --- a/tests/script/tsim/query/explain.sim +++ b/tests/script/tsim/query/explain.sim @@ -45,6 +45,7 @@ print ======== step3 sql explain verbose true select * from st1 where -2; sql explain verbose true select ts from tb1 where f1 > 0; sql explain verbose true select * from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00'; +sql explain verbose true select count(*) from st1 partition by tbname slimit 1 soffset 2 limit 2 offset 1; sql explain verbose true select * from information_schema.user_stables where db_name='db2'; print ======== step4 diff --git a/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim b/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim index ec03aaf9db076b45538b8dc7d2071b4d6b9d0922..241781eed103e6c4cbf9971b18513ca79f213b09 100644 --- a/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim +++ b/tests/script/tsim/sync/vnodesnapshot-rsma-test.sim @@ -47,7 +47,7 @@ endi $replica = 3 $vgroups = 1 -$retentions = 5s:7d,15s:21d +$retentions = 5s:7d,15s:21d,1m:365d print ============= create database sql create database db replica $replica vgroups $vgroups retentions $retentions @@ -114,7 +114,7 @@ endi vg_ready: print ====> create stable/child table -sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int) rollup(sum) +sql create table stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int) rollup(sum) watermark 3s,3s max_delay 3s,3s sql show stables if $rows != 1 then @@ -129,20 +129,28 @@ system sh/exec.sh -n dnode4 -s stop -x SIGINT sleep 3000 -print ===> write 100 records -$N = 100 -$count = 0 -while $count < $N - $ms = 1659000000000 + $count - sql insert into ct1 values( $ms , $count , 2.1, 3.1) - $count = $count + 1 -endw +print ===> write 0-50 records +$ms = 0 +$cnt = 0 +while $cnt < 50 + $ms = $cnt . m + sql insert into ct1 values (now + $ms , $cnt , 2.1, 3.1) + $cnt = $cnt + 1 + endw +print ===> flush database db +sql flush database db; +sleep 5000 +print ===> write 51-100 records +while $cnt < 100 + $ms = $cnt . m + sql insert into ct1 values (now + $ms , $cnt , 2.1, 3.1) + $cnt = $cnt + 1 + endw -#sql flush database db; - - -sleep 3000 +print ===> flush database db +sql flush database db; +sleep 5000 print ===> stop dnode1 dnode2 dnode3 @@ -150,8 +158,6 @@ system sh/exec.sh -n dnode1 -s stop -x SIGINT system sh/exec.sh -n dnode2 -s stop -x SIGINT system sh/exec.sh -n dnode3 -s stop -x SIGINT -sleep 10000 - ######################################################## print ===> start dnode1 dnode2 dnode3 dnode4 system sh/exec.sh -n dnode1 -s start @@ -164,7 +170,7 @@ sleep 3000 print =============== query data sql connect sql use db -sql select * from ct1 +sql select * from ct1 where ts > now - 1d print rows: $rows print $data00 $data01 $data02 if $rows != 100 then diff --git a/tests/script/tsim/tag/add.sim b/tests/script/tsim/tag/add.sim index a22d337eff9921e0323efa085bfece58e1826ff2..c08ae0ff4cb937b3d42ed5bf6eb0c47b1bfba824 100644 --- a/tests/script/tsim/tag/add.sim +++ b/tests/script/tsim/tag/add.sim @@ -831,16 +831,10 @@ sql alter table $mt add tag tgcol4 int sql alter table $mt add tag tgcol5 bigint sql alter table $mt add tag tgcol6 bigint -return -sql alter table $mt add tag tgcol7 bigint -x step141 - return -1 -step141: -sql reset query cache + sql reset query cache sql alter table $mt drop tag tgcol6 sql alter table $mt add tag tgcol7 bigint -sql alter table $mt add tag tgcol8 bigint -x step142 - return -1 -step142: +sql alter table $mt add tag tgcol8 bigint print =============== clear sql drop database $db diff --git a/tests/script/tsim/tag/create.sim b/tests/script/tsim/tag/create.sim index e25f1eb71f9818404e43ab63c76d7829f18bb49b..da683389cb5b30f3f0d8ff520fb5b92ecddf63d8 100644 --- a/tests/script/tsim/tag/create.sim +++ b/tests/script/tsim/tag/create.sim @@ -179,7 +179,7 @@ $tb = $tbPrefix . $i sql create table $mt (ts timestamp, tbcol int) TAGS(tgcol bool, tgcol2 bool) sql create table $tb using $mt tags( 1, 2 ) sql insert into $tb values(now, 1) -sql select * from $mt where tgcol2 = 2 +sql select * from $mt where tgcol2 = 1 if $rows != 1 then print expect 1, actual: $rows return -1 @@ -453,7 +453,7 @@ endi if $data01 != 1 then return -1 endi -sql select * from $mt where tgcol2 = 2 +sql select * from $mt where tgcol2 = 1 if $rows != 1 then return -1 endi diff --git a/tests/script/tsim/tag/set.sim b/tests/script/tsim/tag/set.sim index 2c14313a07d53a6098305160804e4d97fddadbe4..5bd3463e3ad995a1ce6a92878867b06852ec6d5d 100644 --- a/tests/script/tsim/tag/set.sim +++ b/tests/script/tsim/tag/set.sim @@ -452,4 +452,4 @@ if $rows != 2 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/valgrind/basic1.sim b/tests/script/tsim/valgrind/basic1.sim index a259dd9219f3b636709b6b0265305318e4f6fc0b..2d70284c8bb67975296c9dbccd18c354051b11a4 100644 --- a/tests/script/tsim/valgrind/basic1.sim +++ b/tests/script/tsim/valgrind/basic1.sim @@ -45,17 +45,7 @@ while $i < $tbNum endw print =============== step3: tb -sql explain analyze select ts from stb where -2; -sql explain analyze select ts from tb1; -sql explain analyze select ts from stb order by ts; -sql explain analyze select * from information_schema.user_stables; -sql explain analyze select count(*),sum(tbcol) from tb1; -sql explain analyze select count(*),sum(tbcol) from stb; -sql explain analyze select count(*),sum(tbcol) from stb group by tbcol; -sql explain analyze select * from information_schema.user_stables; -sql explain analyze verbose true select * from information_schema.user_stables where db_name='db2'; -sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) -sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +sql select _wstart, count(*) from tb1 session(ts,1m) _OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/valgrind/basic2.sim b/tests/script/tsim/valgrind/basic2.sim index 7c905209ee2967b458a7036c46ea7a1014b029ec..522070f48711964ce21d870b574f14f12306668b 100644 --- a/tests/script/tsim/valgrind/basic2.sim +++ b/tests/script/tsim/valgrind/basic2.sim @@ -1,7 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 -system sh/cfg.sh -n dnode1 -c debugflag -v 131 -system sh/exec.sh -n dnode1 -s start +system sh/exec.sh -n dnode1 -s start -v sql connect print =============== step1: create drop show dnodes @@ -42,53 +41,16 @@ while $i < $tbNum sql insert into $tb values ($ms , $x , $x , $x ) $x = $x + 1 endw - - $cc = $x * 60000 - $ms = 1601481600000 + $cc - sql insert into $tb values ($ms , NULL , NULL , NULL ) $i = $i + 1 endw -system sh/exec.sh -n dnode1 -s stop -x SIGINT -system sh/exec.sh -n dnode1 -s start -v - print =============== step3: tb -sql select avg(tbcol) from tb1 -sql select avg(tbcol) from tb1 where ts <= 1601481840000 -sql select avg(tbcol) as b from tb1 -sql select avg(tbcol) as b from tb1 interval(1d) -sql select avg(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m) -sql select bottom(tbcol, 2) from tb1 where ts <= 1601481840000 -sql select top(tbcol, 2) from tb1 where ts <= 1601481840000 -sql select percentile(tbcol, 2) from tb1 where ts <= 1601481840000 -sql select leastsquares(tbcol, 1, 1) as b from tb1 where ts <= 1601481840000 -sql show table distributed tb1 -sql select count(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m) -sql select diff(tbcol) from tb1 where ts <= 1601481840000 -sql select diff(tbcol) from tb1 where tbcol > 5 and tbcol < 20 -sql select first(tbcol), last(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m) -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol interval(1m) -#sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) -sql select last_row(*) from tb1 where tbcol > 5 and tbcol < 20 - -print =============== step4: stb -sql select avg(tbcol) as c from stb -sql select avg(tbcol) as c from stb where ts <= 1601481840000 -sql select avg(tbcol) as c from stb where tgcol < 5 and ts <= 1601481840000 -sql select avg(tbcol) as c from stb interval(1m) -sql select avg(tbcol) as c from stb interval(1d) -sql select avg(tbcol) as b from stb where ts <= 1601481840000 interval(1m) -sql select avg(tbcol) as c from stb group by tgcol -sql select avg(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m) -sql show table distributed stb -sql select count(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m) -sql select diff(tbcol) from stb where ts <= 1601481840000 -sql select first(tbcol), last(tbcol) as c from stb group by tgcol -sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 and tbcol2 is null partition by tgcol interval(1m) -sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m) -sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from stb where ts <= 1601481840000 partition by tgcol interval(1m) -#sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) -sql select last_row(tbcol), stddev(tbcol) from stb where tbcol > 5 and tbcol < 20 group by tgcol +sql select * from tb1 where ts in ('2018-07-10 16:31:01', '2022-07-10 16:31:03', 1657441865000); +sql select * from tb1 where tbcol2 in (257); +sql select * from tb1 where tbcol3 in (2, 257); +sql select * from stb where ts in ('2018-07-10 16:31:01', '2022-07-10 16:31:03', 1657441865000); +sql select * from stb where tbcol2 in (257); +sql select * from stb where tbcol3 in (2, 257); _OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/valgrind/checkError5.sim b/tests/script/tsim/valgrind/checkError5.sim index f0786587d9597c14971f9d49ce4144014aef6d81..bf3f9a1b0b0bcca98644323cfd796c9c4d50ae15 100644 --- a/tests/script/tsim/valgrind/checkError5.sim +++ b/tests/script/tsim/valgrind/checkError5.sim @@ -100,11 +100,13 @@ sql select * from db.stb sql select * from db.stb sql_error create table db.ctb2 using db.stb tags(101, "102") sql create table db.ctb2 using db.stb tags(101, 102, "103", 104) -sql insert into db.ctb2 values(now, 1, 2, 3) +sql insert into db.ctb2 values(now, 1, 2, 3) print =============== step6: query data sql select * from db.stb where tbname = 'ctb2'; - +sql alter table ctb2 set tag t1=1; +sql alter table ctb2 set tag t3='3'; +sql select * from db.stb where t1 = 1; print =============== step7: normal table sql create database d1 replica 1 duration 7 keep 50 diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim index ec8ca0ad8c6b615b6cd6748743f51aad603f7073..7a16f5668a442583cf2705a87a28776e78517c6c 100644 --- a/tests/script/tsim/valgrind/checkError6.sim +++ b/tests/script/tsim/valgrind/checkError6.sim @@ -104,9 +104,31 @@ sql select length("abcd1234"), char_length("abcd1234=-+*") from stb sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), rtrim(tbcol4), concat(tbcol4, tbcol5), concat_ws('_', tbcol4, tbcol5), substr(tbcol4, 1, 4) from stb sql select * from stb where tbcol not in (1,2,3,null); sql select * from stb where tbcol + 3 <> null; +sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb where tbcol = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol interval(1d) print =============== step5: explain - +sql explain analyze select ts from stb where -2; +sql explain analyze select ts from tb1; +sql explain analyze select ts from stb order by ts; +sql explain analyze select * from information_schema.user_stables; +sql explain analyze select count(*),sum(tbcol) from tb1; +sql explain analyze select count(*),sum(tbcol) from stb; +sql explain analyze select count(*),sum(tbcol) from stb group by tbcol; +sql explain analyze select * from information_schema.user_stables; +sql explain analyze verbose true select * from information_schema.user_stables where db_name='db2'; +sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) +sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) + +print =============== step6: in cast +sql select 1+1n; +sql select cast(1 as timestamp)+1n; +sql select cast(1 as timestamp)+1y; +sql select * from tb1 where ts in ('2018-07-10 16:31:01', '2022-07-10 16:31:03', 1657441865000); +sql select * from tb1 where tbcol2 in (257); +sql select * from tb1 where tbcol3 in (2, 257); +sql select * from stb where ts in ('2018-07-10 16:31:01', '2022-07-10 16:31:03', 1657441865000); +sql select * from stb where tbcol2 in (257); +sql select * from stb where tbcol3 in (2, 257); print =============== check $null= diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/valgrind/checkUdf.sim similarity index 91% rename from tests/script/tsim/query/udf.sim rename to tests/script/tsim/valgrind/checkUdf.sim index 5d69887c864621187d204f66511e7043876a1fc7..1026bad3d023cd86fce66519d37f736c532fb2b6 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/valgrind/checkUdf.sim @@ -1,8 +1,6 @@ system sh/stop_dnodes.sh system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c udf -v 1 - -print ========= start dnode1 as leader system sh/exec.sh -n dnode1 -s start sql connect @@ -149,4 +147,18 @@ if $rows != 0 then return -1 endi -system sh/exec.sh -n dnode1 -s stop -x SIGTERM +_OVER: +system sh/exec.sh -n dnode1 -s stop -x SIGINT +print =============== check +$null= + +system_content sh/checkValgrind.sh -n dnode1 +print cmd return result ----> [ $system_content ] +if $system_content > 0 then + return -1 +endi + +if $system_content == $null then + return -1 +endi + diff --git a/tests/system-test/0-others/cachemodel.py b/tests/system-test/0-others/cachemodel.py index 102a34612d2473e7bd3f73432d045c56a95baeac..7fc20039833001e48a51d789f2bd994b0f11d1a4 100644 --- a/tests/system-test/0-others/cachemodel.py +++ b/tests/system-test/0-others/cachemodel.py @@ -11,7 +11,7 @@ from util.dnodes import * class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , + updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143} def init(self, conn, logSql): @@ -37,7 +37,7 @@ class TDTestCase: def illegal_params(self): illegal_params = ["1","0","NULL","False","True" ,"keep","now" ,"*" , "," ,"_" , "abc" ,"keep"] - + for value in illegal_params: tdSql.error("create database testdb replica 1 cachemodel '%s' " %value) @@ -80,9 +80,9 @@ class TDTestCase: tdSql.execute(" insert into tb2 values(now , %d, %f)" %(k,k*10) ) def check_cachemodel_sets(self): - - - # check cache_last value for database + + + # check cache_last value for database tdSql.query(" show databases ") databases_infos = tdSql.queryResult @@ -96,10 +96,10 @@ class TDTestCase: continue cache_lasts[dbname]=self.getCacheModelNum(cache_last_value) - - # cache_last_set value + + # cache_last_set value for k , v in cache_lasts.items(): - + if k=="testdb_"+str(self.getCacheModelStr(v)): tdLog.info(" database %s cache_last value check pass, value is %s "%(k,self.getCacheModelStr(v)) ) else: @@ -116,7 +116,7 @@ class TDTestCase: dataPath = buildPath + "/../sim/dnode1/data" abs_vnodePath = os.path.abspath(dataPath)+"/vnode/" tdLog.info("abs_vnodePath: %s" % abs_vnodePath) - + tdSql.query(" show dnodes ") dnode_id = tdSql.queryResult[0][0] @@ -127,7 +127,7 @@ class TDTestCase: vgroups_infos = tdSql.queryResult for vgroup_info in vgroups_infos: vnode_json = abs_vnodePath + "/vnode" +f"{vgroup_info[0]}/" + "vnode.json" - vnode_info_of_db = f"cat {vnode_json}" + vnode_info_of_db = f"cat {vnode_json}" vnode_info = subprocess.check_output(vnode_info_of_db, shell=True).decode("utf-8") infoDict = json.loads(vnode_info) vnode_json_of_dbname = f"{dnode_id}."+ dbname @@ -142,7 +142,7 @@ class TDTestCase: tdLog.exit("cacheLast not found in vnode.json of vnode%d "%(vgroup_info[0])) def restart_check_cachemodel_sets(self): - + for i in range(3): tdSql.query("show dnodes") index = tdSql.getData(0, 0) @@ -157,7 +157,7 @@ class TDTestCase: self.prepare_datas() self.check_cachemodel_sets() self.restart_check_cachemodel_sets() - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/0-others/sysinfo.py b/tests/system-test/0-others/sysinfo.py index f6f177d995dd32cc185f67cbd723f7eaff72874d..a4716dd5444efa6d6644b03c0e5e2ab10aa1a0b1 100644 --- a/tests/system-test/0-others/sysinfo.py +++ b/tests/system-test/0-others/sysinfo.py @@ -33,14 +33,14 @@ class TDTestCase: tdSql.query('select database()') tdSql.checkData(0,0,self.dbname) tdSql.execute(f'drop database {self.dbname}') - + def check_version(self): taos_list = ['server','client'] for i in taos_list: tdSql.query(f'select {i}_version()') version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1] tdSql.checkData(0,0,version_info) - + def get_server_status(self): sleep(self.delaytime) tdSql.query('select server_status()') @@ -51,7 +51,7 @@ class TDTestCase: if platform.system().lower() == 'windows': sleep(10) tdSql.error('select server_status()') - + def run(self): self.get_database_info() self.check_version() @@ -61,4 +61,4 @@ class TDTestCase: tdLog.success("%s successfully executed" % __file__) tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/taosShell.py b/tests/system-test/0-others/taosShell.py index f55813ac834edc4f26255ae02ff1ff5c7505bd5a..4f24a3bf4aacc4f2c6322e8b8ac06ebf70fadd8b 100644 --- a/tests/system-test/0-others/taosShell.py +++ b/tests/system-test/0-others/taosShell.py @@ -18,7 +18,7 @@ from util.dnodes import * def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''): if len(key) == 0: tdLog.exit("taos test key is null!") - + if platform.system().lower() == 'windows': taosCmd = buildPath + '\\build\\bin\\taos.exe ' taosCmd = taosCmd.replace('\\','\\\\') @@ -214,7 +214,7 @@ class TDTestCase: retCode, retVal = taos_command(buildPath, "p", keyDict['p'], "taos>", keyDict['c'], '', "A", '') if retCode != "TAOS_OK": tdLog.exit("taos -A fail") - + sqlString = 'create database ' + newDbName + ';' retCode = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'a', retVal) if retCode != "TAOS_OK": @@ -237,7 +237,7 @@ class TDTestCase: tdLog.exit("taos -s fail") print ("========== check new db ==========") - tdSql.query("show databases") + tdSql.query("show databases") for i in range(tdSql.queryRows): if tdSql.getData(i, 0) == newDbName: break @@ -259,24 +259,24 @@ class TDTestCase: if retCode != "TAOS_OK": tdLog.exit("taos -s insert data fail") - sqlString = "select * from " + newDbName + ".ctb0" + sqlString = "select * from " + newDbName + ".ctb0" tdSql.query(sqlString) tdSql.checkData(0, 0, '2021-04-01 08:00:00.000') tdSql.checkData(0, 1, 10) tdSql.checkData(1, 0, '2021-04-01 08:00:01.000') tdSql.checkData(1, 1, 20) - sqlString = "select * from " + newDbName + ".ctb1" + sqlString = "select * from " + newDbName + ".ctb1" tdSql.query(sqlString) tdSql.checkData(0, 0, '2021-04-01 08:00:00.000') tdSql.checkData(0, 1, 11) tdSql.checkData(1, 0, '2021-04-01 08:00:01.000') tdSql.checkData(1, 1, 21) - + keyDict['s'] = "\"select * from " + newDbName + ".ctb0\"" retCode = taos_command(buildPath, "s", keyDict['s'], "2021-04-01 08:00:01.000", keyDict['c'], '', '', '') if retCode != "TAOS_OK": tdLog.exit("taos -r show fail") - + tdLog.printNoPrefix("================================ parameter: -r") keyDict['s'] = "\"select * from " + newDbName + ".ctb0\"" retCode = taos_command(buildPath, "s", keyDict['s'], "1617235200000", keyDict['c'], '', 'r', '') @@ -287,9 +287,9 @@ class TDTestCase: retCode = taos_command(buildPath, "s", keyDict['s'], "1617235201000", keyDict['c'], '', 'r', '') if retCode != "TAOS_OK": tdLog.exit("taos -r show fail") - + tdSql.query('drop database %s'%newDbName) - + tdLog.printNoPrefix("================================ parameter: -f") pwd=os.getcwd() newDbName="dbf" @@ -298,15 +298,15 @@ class TDTestCase: sql2 = "echo use " + newDbName + " >> " + sqlFile if platform.system().lower() == 'windows': sql3 = "echo create table ntbf (ts timestamp, c binary(40)) >> " + sqlFile - sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile + sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile else: sql3 = "echo 'create table ntbf (ts timestamp, c binary(40))' >> " + sqlFile - sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile - sql5 = "echo show databases >> " + sqlFile - os.system(sql1) - os.system(sql2) - os.system(sql3) - os.system(sql4) + sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile + sql5 = "echo show databases >> " + sqlFile + os.system(sql1) + os.system(sql2) + os.system(sql3) + os.system(sql4) os.system(sql5) keyDict['f'] = pwd + "/0-others/sql.txt" @@ -316,7 +316,7 @@ class TDTestCase: tdLog.exit("taos -f fail") print ("========== check new db ==========") - tdSql.query("show databases") + tdSql.query("show databases") for i in range(tdSql.queryRows): #print ("dbseq: %d, dbname: %s"%(i, tdSql.getData(i, 0))) if tdSql.getData(i, 0) == newDbName: @@ -324,13 +324,13 @@ class TDTestCase: else: tdLog.exit("create db fail after taos -f fail") - sqlString = "select * from " + newDbName + ".ntbf" + sqlString = "select * from " + newDbName + ".ntbf" tdSql.query(sqlString) tdSql.checkData(0, 0, '2021-04-01 08:00:00.000') tdSql.checkData(0, 1, 'test taos -f1') tdSql.checkData(1, 0, '2021-04-01 08:00:01.000') tdSql.checkData(1, 1, 'test taos -f2') - + shellCmd = "rm -f " + sqlFile os.system(shellCmd) tdSql.query('drop database %s'%newDbName) @@ -345,9 +345,9 @@ class TDTestCase: #print ("-C return content:\n ", retVal) totalCfgItem = {"firstEp":['', '', ''], } for line in retVal.splitlines(): - strList = line.split() + strList = line.split() if (len(strList) > 2): - totalCfgItem[strList[1]] = strList + totalCfgItem[strList[1]] = strList #print ("dict content:\n ", totalCfgItem) firstEp = keyDict["h"] + ':' + keyDict['P'] @@ -356,8 +356,8 @@ class TDTestCase: if (totalCfgItem["rpcDebugFlag"][2] != self.rpcDebugFlagVal) and (totalCfgItem["rpcDebugFlag"][0] != 'cfg_file'): tdLog.exit("taos -C return rpcDebugFlag error!") - - count = os.cpu_count() + + count = os.cpu_count() if (totalCfgItem["numOfCores"][2] != count) and (totalCfgItem["numOfCores"][0] != 'default'): tdLog.exit("taos -C return numOfCores error!") diff --git a/tests/system-test/0-others/taosShellError.py b/tests/system-test/0-others/taosShellError.py index 5735e55c03ec8e0d301a2099dcdaa51fc57b5e40..8666c2e54d9cb4af659479e7de850272050c0876 100644 --- a/tests/system-test/0-others/taosShellError.py +++ b/tests/system-test/0-others/taosShellError.py @@ -18,7 +18,7 @@ from util.dnodes import * def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''): if len(key) == 0: tdLog.exit("taos test key is null!") - + if platform.system().lower() == 'windows': taosCmd = buildPath + '\\build\\bin\\taos.exe ' taosCmd = taosCmd.replace('\\','\\\\') @@ -231,7 +231,7 @@ class TDTestCase: tdLog.info("taos -P %s test success"%keyDict['P']) else: tdLog.exit("taos -P %s fail"%keyDict['P']) - + tdLog.printNoPrefix("================================ parameter: -f with error sql ") pwd=os.getcwd() newDbName="dbf" @@ -240,15 +240,15 @@ class TDTestCase: sql2 = "echo use " + newDbName + " >> " + sqlFile if platform.system().lower() == 'windows': sql3 = "echo create table ntbf (ts timestamp, c binary(40)) no this item >> " + sqlFile - sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile + sql4 = "echo insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\") >> " + sqlFile else: sql3 = "echo 'create table ntbf (ts timestamp, c binary(40)) no this item' >> " + sqlFile - sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile - sql5 = "echo show databases >> " + sqlFile - os.system(sql1) - os.system(sql2) - os.system(sql3) - os.system(sql4) + sql4 = "echo 'insert into ntbf values (\"2021-04-01 08:00:00.000\", \"test taos -f1\")(\"2021-04-01 08:00:01.000\", \"test taos -f2\")' >> " + sqlFile + sql5 = "echo show databases >> " + sqlFile + os.system(sql1) + os.system(sql2) + os.system(sql3) + os.system(sql4) os.system(sql5) keyDict['f'] = pwd + "/0-others/sql.txt" @@ -258,7 +258,7 @@ class TDTestCase: tdLog.exit("taos -f fail") print ("========== check new db ==========") - tdSql.query("show databases") + tdSql.query("show databases") for i in range(tdSql.queryRows): #print ("dbseq: %d, dbname: %s"%(i, tdSql.getData(i, 0))) if tdSql.getData(i, 0) == newDbName: @@ -266,9 +266,9 @@ class TDTestCase: else: tdLog.exit("create db fail after taos -f fail") - sqlString = "select * from " + newDbName + ".ntbf" + sqlString = "select * from " + newDbName + ".ntbf" tdSql.error(sqlString) - + shellCmd = "rm -f " + sqlFile os.system(shellCmd) @@ -281,16 +281,16 @@ class TDTestCase: tdSql.query('drop database %s'%newDbName) tdLog.printNoPrefix("================================ parameter: -a with error value") - #newDbName="dba" - errorPassword = 'errorPassword' + #newDbName="dba" + errorPassword = 'errorPassword' sqlString = 'create database ' + newDbName + ';' retCode, retVal = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'a', errorPassword) if retCode != "TAOS_FAIL": tdLog.exit("taos -u %s -a %s"%(keyDict['u'], errorPassword)) tdLog.printNoPrefix("================================ parameter: -p with error value") - #newDbName="dba" - keyDict['p'] = 'errorPassword' + #newDbName="dba" + keyDict['p'] = 'errorPassword' retCode, retVal = taos_command(buildPath, "u", keyDict['u'], "taos>", keyDict['c'], sqlString, 'p', keyDict['p']) if retCode == "TAOS_FAIL" and "Authentication failure" in retVal: tdLog.info("taos -p %s test success"%keyDict['p']) diff --git a/tests/system-test/0-others/taosShellNetChk.py b/tests/system-test/0-others/taosShellNetChk.py index 22c9c8c0c5f58a5c4c705302a3c534295ba02c5f..dd44852d4902e7d444e9aebc52fa3b4bb186fc08 100644 --- a/tests/system-test/0-others/taosShellNetChk.py +++ b/tests/system-test/0-others/taosShellNetChk.py @@ -18,7 +18,7 @@ from util.dnodes import * def taos_command (buildPath, key, value, expectString, cfgDir, sqlString='', key1='', value1=''): if len(key) == 0: tdLog.exit("taos test key is null!") - + if platform.system().lower() == 'windows': taosCmd = buildPath + '\\build\\bin\\taos.exe ' taosCmd = taosCmd.replace('\\','\\\\') @@ -158,34 +158,34 @@ class TDTestCase: if "2: service ok" in retVal: tdLog.info("taos -k success") else: - tdLog.info(retVal) + tdLog.info(retVal) tdLog.exit("taos -k fail 1") # stop taosd tdDnodes.stop(1) #sleep(10) #tdDnodes.start(1) - #sleep(5) + #sleep(5) retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString) if "0: unavailable" in retVal: tdLog.info("taos -k success") else: - tdLog.info(retVal) + tdLog.info(retVal) tdLog.exit("taos -k fail 2") # restart taosd tdDnodes.start(1) - #sleep(5) + #sleep(5) retCode, retVal = taos_command(buildPath, "k", '', "", keyDict['c'], sqlString) if "2: service ok" in retVal: tdLog.info("taos -k success") else: - tdLog.info(retVal) + tdLog.info(retVal) tdLog.exit("taos -k fail 3") tdLog.printNoPrefix("================================ parameter: -n") # stop taosd - tdDnodes.stop(1) + tdDnodes.stop(1) try: role = 'server' @@ -220,7 +220,7 @@ class TDTestCase: #print(child.after.decode()) if i == 0: tdLog.exit('taos -n server fail!') - + expectString1 = 'response is received, size:' + pktLen expectSTring2 = pktNum + '/' + pktNum if expectString1 in retResult and expectSTring2 in retResult: diff --git a/tests/system-test/0-others/taosdMonitor.py b/tests/system-test/0-others/taosdMonitor.py index 4c5a434f0cc9e352934605dd2d1fd865681b7f3a..4466c4a854e481e2067b030129057ce8cb0a3211 100644 --- a/tests/system-test/0-others/taosdMonitor.py +++ b/tests/system-test/0-others/taosdMonitor.py @@ -51,7 +51,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): if "version" not in infoDict["cluster_info"] or infoDict["cluster_info"]["version"] == None: tdLog.exit("first_ep_dnode_id is null!") - + if "master_uptime" not in infoDict["cluster_info"] or infoDict["cluster_info"]["master_uptime"] == None: tdLog.exit("master_uptime is null!") @@ -69,13 +69,13 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): if "dnodes" not in infoDict["cluster_info"] or infoDict["cluster_info"]["dnodes"] == None : tdLog.exit("dnodes is null!") - + dnodes_info = { "dnode_id": 1,"dnode_ep": self.hostPort,"status":"ready"} - + for k ,v in dnodes_info.items(): if k not in infoDict["cluster_info"]["dnodes"][0] or v != infoDict["cluster_info"]["dnodes"][0][k] : tdLog.exit("dnodes info is null!") - + mnodes_info = { "mnode_id":1, "mnode_ep": self.hostPort,"role": "leader" } for k ,v in mnodes_info.items(): @@ -86,7 +86,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): if "vgroup_infos" not in infoDict or infoDict["vgroup_infos"]== None: tdLog.exit("vgroup_infos is null!") - + vgroup_infos_nums = len(infoDict["vgroup_infos"]) for index in range(vgroup_infos_nums): @@ -116,14 +116,14 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): if "timeseries_total" not in infoDict["grant_info"] or not infoDict["grant_info"]["timeseries_total"] > 0: tdLog.exit("timeseries_total is null!") - + # dnode_info ==================================== if "dnode_info" not in infoDict or infoDict["dnode_info"]== None: tdLog.exit("dnode_info is null!") - dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine', - 'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select', + dnode_infos = ['uptime', 'cpu_engine', 'cpu_system', 'cpu_cores', 'mem_engine', 'mem_system', 'mem_total', 'disk_engine', + 'disk_used', 'disk_total', 'net_in', 'net_out', 'io_read', 'io_write', 'io_read_disk', 'io_write_disk', 'req_select', 'req_select_rate', 'req_insert', 'req_insert_success', 'req_insert_rate', 'req_insert_batch', 'req_insert_batch_success', 'req_insert_batch_rate', 'errors', 'vnodes_num', 'masters', 'has_mnode', 'has_qnode', 'has_snode', 'has_bnode'] for elem in dnode_infos: @@ -134,7 +134,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): if "disk_infos" not in infoDict or infoDict["disk_infos"]== None: tdLog.exit("disk_infos is null!") - + # bug for data_dir if "datadir" not in infoDict["disk_infos"] or len(infoDict["disk_infos"]["datadir"]) <=0 : tdLog.exit("datadir is null!") @@ -187,7 +187,7 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): # log_infos ==================================== - + if "log_infos" not in infoDict or infoDict["log_infos"]== None: tdLog.exit("log_infos is null!") @@ -206,13 +206,13 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): if "summary" not in infoDict["log_infos"] or len(infoDict["log_infos"]["summary"])!= 4: tdLog.exit("summary is null!") - + if "total" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["total"] < 0 : tdLog.exit("total is null!") if "level" not in infoDict["log_infos"]["summary"][0] or infoDict["log_infos"]["summary"][0]["level"] not in ["error" ,"info" , "debug" ,"trace"]: tdLog.exit("level is null!") - + def do_GET(self): """ process GET request @@ -227,25 +227,25 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): if contentEncoding == 'gzip': req_body = self.rfile.read(int(self.headers["Content-Length"])) plainText = gzip.decompress(req_body).decode() - else: + else: plainText = self.rfile.read(int(self.headers["Content-Length"])).decode() - + print(plainText) # 1. send response code and header - self.send_response(200) + self.send_response(200) self.send_header("Content-Type", "text/html; charset=utf-8") self.end_headers() - + # 2. send response content #self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8")) - + # 3. check request body info infoDict = json.loads(plainText) #print("================") # print(infoDict) self.telemetryInfoCheck(infoDict) - # 4. shutdown the server and exit case + # 4. shutdown the server and exit case assassin = threading.Thread(target=self.server.shutdown) assassin.daemon = True assassin.start() @@ -287,7 +287,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() # time.sleep(2) diff --git a/tests/system-test/0-others/telemetry.py b/tests/system-test/0-others/telemetry.py index 4483e113bf89b8186da2478aedae0870b1613c4f..812b8b40c54a30fc478c18eb220639d9ef8117a3 100644 --- a/tests/system-test/0-others/telemetry.py +++ b/tests/system-test/0-others/telemetry.py @@ -100,9 +100,9 @@ def telemetryInfoCheck(infoDict=''): if "compStorage" not in infoDict or infoDict["compStorage"] < 0: tdLog.exit("compStorage is null!") - -class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): + +class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): def do_GET(self): """ process GET request @@ -117,26 +117,26 @@ class RequestHandlerImpl(http.server.BaseHTTPRequestHandler): if contentEncoding == 'gzip': req_body = self.rfile.read(int(self.headers["Content-Length"])) plainText = gzip.decompress(req_body).decode() - else: + else: plainText = self.rfile.read(int(self.headers["Content-Length"])).decode() print("monitor info:\n%s"%plainText) # 1. send response code and header - self.send_response(200) + self.send_response(200) self.send_header("Content-Type", "text/html; charset=utf-8") self.end_headers() - + # 2. send response content #self.wfile.write(("Hello World: " + req_body + "\n").encode("utf-8")) - + # 3. check request body info infoDict = json.loads(plainText) #print("================") #print(infoDict) telemetryInfoCheck(infoDict) - # 4. shutdown the server and exit case + # 4. shutdown the server and exit case assassin = threading.Thread(target=self.server.shutdown) assassin.daemon = True assassin.start() @@ -176,7 +176,7 @@ class TDTestCase: def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - + def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring tdSql.prepare() # time.sleep(2) diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py index ddbbd9b2de88e401f64531b451ff0720b2107615..c4c40348b8b5bd5e75b62ba1c914f2e7f0e29c8b 100644 --- a/tests/system-test/0-others/udfTest.py +++ b/tests/system-test/0-others/udfTest.py @@ -512,7 +512,7 @@ class TDTestCase: "select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" , "select num1,num2,num3,udf1(num1,num2,num3) from tb" , "select c1,c6,udf1(c1,c6) from stb1 order by ts" , - "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;" + "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;" ] udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , "select udf2(c1) from stb1 group by 1-udf1(c1)" , diff --git a/tests/system-test/0-others/udf_cfg1.py b/tests/system-test/0-others/udf_cfg1.py index fc9265617dcd0af3436b83afcbf0e3d023507636..e6ab57b4882e5d66de0560d29908b54bb93e5357 100644 --- a/tests/system-test/0-others/udf_cfg1.py +++ b/tests/system-test/0-others/udf_cfg1.py @@ -190,7 +190,7 @@ class TDTestCase: tdSql.execute("use db ") tdSql.error("select num1 , udf1(num1) ,num2 ,udf1(num2),num3 ,udf1(num3),num4 ,udf1(num4) from tb") tdSql.error("select c1 , udf1(c1) ,c2 ,udf1(c2), c3 ,udf1(c3), c4 ,udf1(c4) from stb1 order by c1") - + # aggregate functions tdSql.error("select udf2(num1) ,udf2(num2), udf2(num3) from tb") diff --git a/tests/system-test/0-others/udf_cfg2.py b/tests/system-test/0-others/udf_cfg2.py index 07f83b145529ac700c1b98650ed55ccbc7a42d6a..3f8ba37491edd4c15e41c7903d2fb942009b36c2 100644 --- a/tests/system-test/0-others/udf_cfg2.py +++ b/tests/system-test/0-others/udf_cfg2.py @@ -514,7 +514,7 @@ class TDTestCase: "select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" , "select num1,num2,num3,udf1(num1,num2,num3) from tb" , "select c1,c6,udf1(c1,c6) from stb1 order by ts" , - "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;" + "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;" ] udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , "select udf2(c1) from stb1 group by 1-udf1(c1)" , diff --git a/tests/system-test/0-others/udf_cluster.py b/tests/system-test/0-others/udf_cluster.py index 9ef3137a7e0ab28b4e1f396976d96322a10550eb..1ca17383326b8f5ccf2b3fc628d0f69aa0003ab3 100644 --- a/tests/system-test/0-others/udf_cluster.py +++ b/tests/system-test/0-others/udf_cluster.py @@ -1,7 +1,7 @@ import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -16,7 +16,7 @@ class MyDnodes(TDDnodes): super(MyDnodes,self).__init__() self.dnodes = dnodes_lists # dnode must be TDDnode instance self.simDeployed = False - + class TDTestCase: def init(self,conn ,logSql): @@ -26,7 +26,7 @@ class TDTestCase: self.master_dnode = self.TDDnodes.dnodes[0] conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) tdSql.init(conn1.cursor()) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -43,7 +43,7 @@ class TDTestCase: buildPath = root[:len(root) - len("/build/bin")] break return buildPath - + def prepare_udf_so(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -61,7 +61,7 @@ class TDTestCase: def prepare_data(self): - + tdSql.execute("drop database if exists db") tdSql.execute("create database if not exists db replica 1 duration 300") tdSql.execute("use db") @@ -71,7 +71,7 @@ class TDTestCase: tags (t1 int) ''' ) - + tdSql.execute( ''' create table t1 @@ -142,7 +142,7 @@ class TDTestCase: # create aggregate functions tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;") - + # functions = tdSql.getResult("show functions") # function_nums = len(functions) # if function_nums == 2: @@ -167,14 +167,14 @@ class TDTestCase: # create aggregate functions tdSql.execute("create aggregate function udf2 as '/tmp/udf/libudf2.so' outputtype double bufSize 8;") - + functions = tdSql.getResult("show functions") function_nums = len(functions) if function_nums == 2: tdLog.info("create two udf functions success ") - + def basic_udf_query(self , dnode): - + mytdSql = self.getConnection(dnode) # scalar functions @@ -229,7 +229,7 @@ class TDTestCase: else: tdLog.info(" UDF query check failed at :dnode_index %s" %dnode.index) tdLog.exit("query check failed at :dnode_index %s" %dnode.index ) - + def check_UDF_query(self): @@ -238,10 +238,10 @@ class TDTestCase: self.basic_udf_query(dnode) - def depoly_cluster(self ,dnodes_nums): + def depoly_cluster(self ,dnodes_nums): testCluster = False - valgrind = 0 + valgrind = 0 hostname = socket.gethostname() dnodes = [] start_port = 6030 @@ -253,7 +253,7 @@ class TDTestCase: dnode.addExtraCfg("monitorFqdn", hostname) dnode.addExtraCfg("monitorPort", 7043) dnodes.append(dnode) - + self.TDDnodes = MyDnodes(dnodes) self.TDDnodes.init("") self.TDDnodes.setTestCluster(testCluster) @@ -261,11 +261,11 @@ class TDTestCase: self.TDDnodes.stopAll() for dnode in self.TDDnodes.dnodes: self.TDDnodes.deploy(dnode.index,{}) - + for dnode in self.TDDnodes.dnodes: self.TDDnodes.start(dnode.index) - # create cluster + # create cluster for dnode in self.TDDnodes.dnodes: print(dnode.cfgDict) @@ -275,12 +275,12 @@ class TDTestCase: cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;" print(cmd) os.system(cmd) - + time.sleep(2) tdLog.info(" create cluster done! ") - - + + def getConnection(self, dnode): host = dnode.cfgDict["fqdn"] port = dnode.cfgDict["serverPort"] @@ -288,23 +288,23 @@ class TDTestCase: return taos.connect(host=host, port=int(port), config=config_dir) def restart_udfd(self, dnode): - + buildPath = self.getBuildPath() if (buildPath == ""): tdLog.exit("taosd not found!") else: tdLog.info("taosd found in %s" % buildPath) - + cfgPath = dnode.cfgDir - + udfdPath = buildPath +'/build/bin/udfd' for i in range(5): tdLog.info(" loop restart udfd %d_th at dnode_index : %s" % (i ,dnode.index)) self.basic_udf_query(dnode) - # stop udfd cmds + # stop udfd cmds get_processID = "ps -ef | grep -w udfd | grep %s | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}'"%cfgPath processID = subprocess.check_output(get_processID, shell=True).decode("utf-8") stop_udfd = " kill -9 %s" % processID @@ -327,12 +327,12 @@ class TDTestCase: # self.check_UDF_query() self.restart_udfd(self.master_dnode) # self.test_restart_udfd_All_dnodes() - - + + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/0-others/udf_create.py b/tests/system-test/0-others/udf_create.py index e53ed651f09158c402d07435ab596357cbba49e3..788202eb1b9d39447dec1076dbb3091f2440dd33 100644 --- a/tests/system-test/0-others/udf_create.py +++ b/tests/system-test/0-others/udf_create.py @@ -514,7 +514,7 @@ class TDTestCase: "select c1,c2, udf1(c1,c2) from stb1 group by c1,c2" , "select num1,num2,num3,udf1(num1,num2,num3) from tb" , "select c1,c6,udf1(c1,c6) from stb1 order by ts" , - "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;" + "select abs(udf1(c1,c6,c1,c6)) , abs(ceil(c1)) from stb1 where c1 is not null order by ts;" ] udf2_sqls = ["select udf2(sub1.c1), udf2(sub2.c2) from sub1, sub2 where sub1.ts=sub2.ts and sub1.c1 is not null" , "select udf2(c1) from stb1 group by 1-udf1(c1)" , diff --git a/tests/system-test/1-insert/create_retentions.py b/tests/system-test/1-insert/create_retentions.py index 2b611420c611e6a0050b82086ea76ef35e87a35c..96f9ea51ef9b32e403964cb57ea37fcada298d0c 100644 --- a/tests/system-test/1-insert/create_retentions.py +++ b/tests/system-test/1-insert/create_retentions.py @@ -1,11 +1,11 @@ -import datetime +from datetime import datetime +import time -from dataclasses import dataclass, field -from typing import List from util.log import * from util.sql import * from util.cases import * from util.dnodes import * +from util.common import * PRIMARY_COL = "ts" @@ -24,44 +24,24 @@ BINARY_COL = "c_binary" NCHAR_COL = "c_nchar" TS_COL = "c_ts" -NUM_COL = [INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, TINT_UN_COL, SINT_UN_COL, BINT_UN_COL, INT_UN_COL] -CHAR_COL = [BINARY_COL, NCHAR_COL, ] -BOOLEAN_COL = [BOOL_COL, ] -TS_TYPE_COL = [TS_COL, ] - INT_TAG = "t_int" -ALL_COL = [PRIMARY_COL, INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BINARY_COL, NCHAR_COL, BOOL_COL, TS_COL] TAG_COL = [INT_TAG] ## insert data args: TIME_STEP = 10000 -NOW = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) +NOW = int(datetime.timestamp(datetime.now()) * 1000) # init db/table DBNAME = "db" +DB1 = "db1" +DB2 = "db2" +DB3 = "db3" +DB4 = "db4" STBNAME = "stb1" CTBNAME = "ct1" NTBNAME = "nt1" -@dataclass -class DataSet: - ts_data : List[int] = field(default_factory=list) - int_data : List[int] = field(default_factory=list) - bint_data : List[int] = field(default_factory=list) - sint_data : List[int] = field(default_factory=list) - tint_data : List[int] = field(default_factory=list) - int_un_data : List[int] = field(default_factory=list) - bint_un_data: List[int] = field(default_factory=list) - sint_un_data: List[int] = field(default_factory=list) - tint_un_data: List[int] = field(default_factory=list) - float_data : List[float] = field(default_factory=list) - double_data : List[float] = field(default_factory=list) - bool_data : List[int] = field(default_factory=list) - binary_data : List[str] = field(default_factory=list) - nchar_data : List[str] = field(default_factory=list) - - class TDTestCase: def init(self, conn, logSql): @@ -83,8 +63,8 @@ class TDTestCase: @property def create_databases_sql_current(self): return [ - "create database db1 retentions 1s:1d", - "create database db2 retentions 1s:1d,2m:2d,3h:3d", + f"create database {DB1} retentions 1s:1d", + f"create database {DB2} retentions 1s:1d,2m:2d,3h:3d", ] @property @@ -95,21 +75,22 @@ class TDTestCase: ] @property - def create_stable_sql_err(self): + def create_stable_sql_err(self, dbname=DB2): return [ - f"create stable stb11 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(ceil) watermark 1s max_delay 1m", - f"create stable stb12 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(count) watermark 1min", - f"create stable stb13 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay -1s", - f"create stable stb14 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark -1m", - f"create stable stb15 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 1m ", - f"create stable stb16 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) max_delay 1m ", - f"create stable stb21 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} binary(16)) tags (tag1 int) rollup(avg) watermark 1s", - f"create stable stb22 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) tags (tag1 int) rollup(avg) max_delay 1m", - f"create table ntb_1 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) rollup(avg) watermark 1s max_delay 1s", - f"create stable stb23 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) tags (tag1 int) " , - f"create stable stb24 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) " , - f"create stable stb25 ({PRIMARY_COL} timestamp, {INT_COL} int) " , - f"create stable stb26 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) " , + f"create stable {dbname}.stb11 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(ceil) watermark 1s max_delay 1m", + f"create stable {dbname}.stb12 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(count) watermark 1min", + f"create stable {dbname}.stb13 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay -1s", + f"create stable {dbname}.stb14 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) watermark -1m", + f"create stable {dbname}.stb15 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 1m ", + f"create stable {dbname}.stb16 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) max_delay 1m ", + f"create stable {dbname}.stb21 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} binary(16)) tags (tag1 int) rollup(avg) watermark 1s", + f"create stable {dbname}.stb22 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) tags (tag1 int) rollup(avg) max_delay 1m", + f"create table {dbname}.ntb_1 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) rollup(avg) watermark 1s max_delay 1s", + f"create table {dbname}.ntb_2 ({PRIMARY_COL} timestamp, {INT_COL} int) " , + f"create stable {dbname}.stb23 ({PRIMARY_COL} timestamp, {INT_COL} int, {NCHAR_COL} nchar(16)) tags (tag1 int) " , + f"create stable {dbname}.stb24 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) " , + f"create stable {dbname}.stb25 ({PRIMARY_COL} timestamp, {INT_COL} int) " , + f"create stable {dbname}.stb26 ({PRIMARY_COL} timestamp, {INT_COL} int, {BINARY_COL} nchar(16)) " , # watermark, max_delay: [0, 900000], [ms, s, m, ?] f"create stable stb17 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(min) max_delay 1u", @@ -135,7 +116,7 @@ class TDTestCase: f"create stable stb7 ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) rollup(first) watermark 5s max_delay 1m sma({INT_COL})", ] - def test_create_stb(self, db="db2"): + def test_create_stb(self, db=DB2): tdSql.execute(f"use {db}") for err_sql in self.create_stable_sql_err: tdSql.error(err_sql) @@ -146,7 +127,7 @@ class TDTestCase: tdSql.checkRows(len(self.create_stable_sql_current)) tdSql.execute("use db") # because db is a noraml database, not a rollup database, should not be able to create a rollup stable - tdSql.error(f"create stable nor_db_rollup_stb ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 5s max_delay 1m") + tdSql.error(f"create stable db.nor_db_rollup_stb ({PRIMARY_COL} timestamp, {INT_COL} int) tags (tag1 int) watermark 5s max_delay 1m") def test_create_databases(self): @@ -162,94 +143,83 @@ class TDTestCase: self.test_create_databases() self.test_create_stb() - def __create_tb(self, stb=STBNAME, ctb_num=20, ntbnum=1, rsma=False): + def __create_tb(self, stb=STBNAME, ctb_num=20, ntbnum=1, rsma=False, dbname=DBNAME, rsma_type="sum"): tdLog.printNoPrefix("==========step: create table") - create_stb_sql = f'''create table {stb}( - ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, - {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, - {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, - {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, - {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned - ) tags ({INT_TAG} int) - ''' - for i in range(ntbnum): - - create_ntb_sql = f'''create table nt{i+1}( + if rsma: + if rsma_type.lower().strip() in ("last", "first"): + create_stb_sql = f'''create table {dbname}.{stb}( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, + {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned, {BINARY_COL} binary(16) + ) tags ({INT_TAG} int) rollup({rsma_type}) watermark 5s,5s max_delay 5s,5s + ''' + else: + create_stb_sql = f'''create table {dbname}.{stb}( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, + {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned + ) tags ({INT_TAG} int) rollup({rsma_type}) watermark 5s,5s max_delay 5s,5s + ''' + tdSql.execute(create_stb_sql) + else: + create_stb_sql = f'''create table {dbname}.{stb}( ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned - ) + ) tags ({INT_TAG} int) ''' - tdSql.execute(create_stb_sql) - tdSql.execute(create_ntb_sql) + tdSql.execute(create_stb_sql) + + for i in range(ntbnum): + create_ntb_sql = f'''create table {dbname}.nt{i+1}( + ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint, + {FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool, + {BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp, + {TINT_UN_COL} tinyint unsigned, {SINT_UN_COL} smallint unsigned, + {INT_UN_COL} int unsigned, {BINT_UN_COL} bigint unsigned + ) + ''' + tdSql.execute(create_ntb_sql) for i in range(ctb_num): - tdSql.execute(f'create table ct{i+1} using {stb} tags ( {i+1} )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )') - def __data_set(self, rows): - data_set = DataSet() + def __insert_data(self, rows, ctb_num=20, dbname=DBNAME, rsma=False, rsma_type="sum"): + tdLog.printNoPrefix("==========step: start inser data into tables now.....") + # from ...pytest.util.common import DataSet + data = DataSet() + data.get_order_set(rows) for i in range(rows): - data_set.ts_data.append(NOW + 1 * (rows - i)) - data_set.int_data.append(rows - i) - data_set.bint_data.append(11111 * (rows - i)) - data_set.sint_data.append(111 * (rows - i) % 32767) - data_set.tint_data.append(11 * (rows - i) % 127) - data_set.int_un_data.append(rows - i) - data_set.bint_un_data.append(11111 * (rows - i)) - data_set.sint_un_data.append(111 * (rows - i) % 32767) - data_set.tint_un_data.append(11 * (rows - i) % 127) - data_set.float_data.append(1.11 * (rows - i)) - data_set.double_data.append(1100.0011 * (rows - i)) - data_set.bool_data.append((rows - i) % 2) - data_set.binary_data.append(f'binary{(rows - i)}') - data_set.nchar_data.append(f'nchar_测试_{(rows - i)}') - - return data_set - - def __insert_data(self): - tdLog.printNoPrefix("==========step: start inser data into tables now.....") - data = self.__data_set(rows=self.rows) - - # now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000) - null_data = '''null, null, null, null, null, null, null, null, null, null, null, null, null, null''' - zero_data = "0, 0, 0, 0, 0, 0, 0, 'binary_0', 'nchar_0', 0, 0, 0, 0, 0" - - for i in range(self.rows): - row_data = f''' - {data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]}, - {data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.tint_un_data[i]}, - {data.sint_un_data[i]}, {data.int_un_data[i]}, {data.bint_un_data[i]} - ''' - neg_row_data = f''' - {-1 * data.int_data[i]}, {-1 * data.bint_data[i]}, {-1 * data.sint_data[i]}, {-1 * data.tint_data[i]}, {-1 * data.float_data[i]}, {-1 * data.double_data[i]}, - {data.bool_data[i]}, '{data.binary_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {1 * data.tint_un_data[i]}, - {1 * data.sint_un_data[i]}, {1 * data.int_un_data[i]}, {1 * data.bint_un_data[i]} - ''' - - tdSql.execute( f"insert into ct1 values ( {NOW - i * TIME_STEP}, {row_data} )" ) - tdSql.execute( f"insert into ct2 values ( {NOW - i * int(TIME_STEP * 0.6)}, {neg_row_data} )" ) - tdSql.execute( f"insert into ct4 values ( {NOW - i * int(TIME_STEP * 0.8) }, {row_data} )" ) - tdSql.execute( f"insert into {NTBNAME} values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )" ) - - tdSql.execute( f"insert into ct2 values ( {NOW + int(TIME_STEP * 0.6)}, {null_data} )" ) - tdSql.execute( f"insert into ct2 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.6)}, {null_data} )" ) - tdSql.execute( f"insert into ct2 values ( {NOW - self.rows * int(TIME_STEP * 0.29) }, {null_data} )" ) - - tdSql.execute( f"insert into ct4 values ( {NOW + int(TIME_STEP * 0.8)}, {null_data} )" ) - tdSql.execute( f"insert into ct4 values ( {NOW - (self.rows + 1) * int(TIME_STEP * 0.8)}, {null_data} )" ) - tdSql.execute( f"insert into ct4 values ( {NOW - self.rows * int(TIME_STEP * 0.39)}, {null_data} )" ) - - tdSql.execute( f"insert into {NTBNAME} values ( {NOW + int(TIME_STEP * 1.2)}, {null_data} )" ) - tdSql.execute( f"insert into {NTBNAME} values ( {NOW - (self.rows + 1) * int(TIME_STEP * 1.2)}, {null_data} )" ) - tdSql.execute( f"insert into {NTBNAME} values ( {NOW - self.rows * int(TIME_STEP * 0.59)}, {null_data} )" ) + if rsma: + if rsma_type.lower().strip() in ("last", "first"): + row_data = f''' + {data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]}, + {data.utint_data[i]}, {data.usint_data[i]}, {data.uint_data[i]}, {data.ubint_data[i]}, '{data.vchar_data[i]}' + ''' + else: + row_data = f''' + {data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]}, + {data.utint_data[i]}, {data.usint_data[i]}, {data.uint_data[i]}, {data.ubint_data[i]} + ''' + else: + row_data = f''' + {data.int_data[i]}, {data.bint_data[i]}, {data.sint_data[i]}, {data.tint_data[i]}, {data.float_data[i]}, {data.double_data[i]}, + {data.bool_data[i]}, '{data.vchar_data[i]}', '{data.nchar_data[i]}', {data.ts_data[i]}, {data.utint_data[i]}, + {data.usint_data[i]}, {data.uint_data[i]}, {data.ubint_data[i]} + ''' + tdSql.execute( f"insert into {dbname}.{NTBNAME} values ( {NOW - i * int(TIME_STEP * 1.2)}, {row_data} )" ) + + + for j in range(ctb_num): + tdSql.execute( f"insert into {dbname}.ct{j+1} values ( {NOW - i * TIME_STEP}, {row_data} )" ) def run(self): self.rows = 10 - tdSql.prepare() + tdSql.prepare(dbname=DBNAME) tdLog.printNoPrefix("==========step0:all check") self.all_test() @@ -257,26 +227,94 @@ class TDTestCase: tdLog.printNoPrefix("==========step1:create table in normal database") tdSql.prepare() self.__create_tb() - self.__insert_data() - # return + self.__insert_data(rows=self.rows) tdLog.printNoPrefix("==========step2:create table in rollup database") - tdSql.execute("create database db3 retentions 1s:4m,2s:8m,3s:12m") - - tdSql.execute("drop database if exists db1 ") - tdSql.execute("drop database if exists db2 ") - - tdSql.execute("use db3") - self.test_create_stb(db="db3") - # self.__create_tb() - # self.__insert_data() - self.all_test() + tdLog.printNoPrefix("==========step2.1 : rolluo func is not last/first") + tdSql.prepare(dbname=DB3, **{"retentions": "1s:1d, 3s:3d, 5s:5d"}) + + db3_ctb_num = 10 + self.__create_tb(rsma=True, dbname=DB3, ctb_num=db3_ctb_num, stb=STBNAME) + self.__insert_data(rows=self.rows, rsma=True, dbname=DB3, ctb_num=db3_ctb_num) + time.sleep(6) + tdSql.query(f"select count(*) from {DB3}.{STBNAME} where ts > now()-5m") + tdSql.checkRows(1) + tdSql.checkData(0, 0, self.rows * db3_ctb_num) + tdSql.execute(f"flush database {DB3}") + tdSql.query(f"select count(*) from {DB3}.{STBNAME} where ts > now()-5m") + tdSql.checkData(0, 0, self.rows * db3_ctb_num) + tdSql.checkRows(1) + tdSql.query(f"select {INT_COL} from {DB3}.{CTBNAME} where ts > now()-4d") + tdSql.checkData(0, 0, self.rows-1) + tdSql.query(f"select {INT_COL} from {DB3}.{CTBNAME} where ts > now()-6d") + tdSql.checkData(0, 0, self.rows-1) + + # from ...pytest.util.sql import tdSql + + tdLog.printNoPrefix("==========step2.1.1 : alter stb schemaL drop column") + tdSql.query(f"select {BINT_COL} from {DB3}.{STBNAME}") + #tdSql.execute(f"alter stable {DB3}.stb1 drop column {BINT_COL}") + # not support alter stable schema anymore + tdSql.error(f"alter stable {DB3}.stb1 drop column {BINT_COL}") + #tdSql.error(f"select {BINT_COL} from {DB3}.{STBNAME}") + + + tdLog.printNoPrefix("==========step2.1.2 : alter stb schemaL add num_column") + # not support alter stable schema anymore + tdSql.error(f"alter stable {DB3}.stb1 add column {INT_COL}_1 int") + tdSql.error(f"select {INT_COL}_1 from {DB3}.{STBNAME}") + #tdSql.execute(f"alter stable {DB3}.stb1 add column {INT_COL}_1 int") + #tdSql.query(f"select count({INT_COL}_1) from {DB3}.{STBNAME} where _c0 > now-5m") + #tdSql.checkData(0, 0, 0) + #tdSql.execute(f"insert into {DB3}.{CTBNAME} ({PRIMARY_COL}, {INT_COL}, {INT_COL}_1) values({NOW}+20s, 111, 112)") + #time.sleep(7) + #tdSql.query(f"select _rowts, {INT_COL}, {INT_COL}_1 from {DB3}.{CTBNAME} where _c0 > now()-1h and _c0>{NOW}") + #tdSql.checkRows(1) + #tdSql.checkData(0, 1, 111) + #tdSql.checkData(0, 2, 112) +# + #tdSql.query(f"select _rowts, {INT_COL}, {INT_COL}_1 from {DB3}.{CTBNAME} where _c0 > now()-2d and _c0>{NOW}") + #tdSql.checkRows(1) + #tdSql.checkData(0, 1, 111) + #tdSql.checkData(0, 2, 112) + #tdSql.query(f"select _rowts, {INT_COL}, {INT_COL}_1 from {DB3}.{CTBNAME} where _c0 > now()-7d and _c0>{NOW}") + #tdSql.checkRows(1) + #tdSql.checkData(0, 1, 111) + #tdSql.checkData(0, 2, 112) + tdLog.printNoPrefix("==========step2.1.3 : drop child-table") + tdSql.execute(f"drop table {DB3}.{CTBNAME} ") + + + tdLog.printNoPrefix("==========step2.2 : rolluo func is last/first") + tdSql.prepare(dbname=DB4, **{"retentions": "1s:1d, 2m:3d, 3m:5d"}) + + db4_ctb_num = 10 + tdSql.execute(f"use {DB4}") + self.__create_tb(rsma=True, dbname=DB4, ctb_num=db4_ctb_num, rsma_type="last") + self.__insert_data(rows=self.rows, rsma=True, dbname=DB4, ctb_num=db4_ctb_num, rsma_type="last") + time.sleep(7) + tdSql.query(f"select count(*) from {DB4}.stb1 where ts > now()-5m") + tdSql.checkRows(1) + tdSql.checkData(0, 0, self.rows * db4_ctb_num) + tdSql.execute(f"flush database {DB4}") + tdSql.query(f"select count(*) from {DB4}.stb1 where ts > now()-5m") + tdSql.checkRows(1) + tdSql.checkData(0, 0, self.rows * db4_ctb_num) + tdSql.query(f"select {INT_COL} from {DB4}.ct1 where ts > now()-4d") + tdSql.checkRows_range([1,2]) + # tdSql.checkData(0, 0, self.rows-1) + tdSql.query(f"select {INT_COL} from {DB4}.ct1 where ts > now()-6d") + tdSql.checkRows_range([1,2]) + # tdSql.checkData(0, 0, self.rows-1) + # return - tdSql.execute("drop database if exists db1 ") - tdSql.execute("drop database if exists db2 ") + tdSql.execute(f"drop database if exists {DB1} ") + tdSql.execute(f"drop database if exists {DB2} ") + # self.all_test() - tdDnodes.stop(1) - tdDnodes.start(1) + # tdDnodes.stop(1) + # tdDnodes.start(1) + tdSql.execute(f"flush database {DBNAME}") tdLog.printNoPrefix("==========step4:after wal, all check again ") self.all_test() diff --git a/tests/system-test/2-query/csum.py b/tests/system-test/2-query/csum.py index 260528be0405c86aa6b92240ce4661fef5f27729..f38a99d80910713b892131f03fcc7fecdddf590a 100644 --- a/tests/system-test/2-query/csum.py +++ b/tests/system-test/2-query/csum.py @@ -279,14 +279,14 @@ class TDTestCase: tdSql.error(self.csum_query_form(alias=", diff(c1)")) # mix with calculation function 2 # tdSql.error(self.csum_query_form(alias=" + 2")) # mix with arithmetic 1 tdSql.error(self.csum_query_form(alias=" + avg(c1)")) # mix with arithmetic 2 - tdSql.error(self.csum_query_form(alias=", c2")) # mix with other 1 + # tdSql.error(self.csum_query_form(alias=", c2")) # mix with other 1 # tdSql.error(self.csum_query_form(table_expr="stb1")) # select stb directly - stb_join = { - "col": "stb1.c1", - "table_expr": "stb1, stb2", - "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" - } - tdSql.error(self.csum_query_form(**stb_join)) # stb join + #stb_join = { + # "col": "stb1.c1", + # "table_expr": "stb1, stb2", + # "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts" + #} + #tdSql.error(self.csum_query_form(**stb_join)) # stb join interval_sql = { "condition": "where ts>0 and ts < now interval(1h) fill(next)" } @@ -421,6 +421,19 @@ class TDTestCase: tdSql.query("select csum(abs(c1))+2 from db.t1 ") tdSql.checkRows(4) + # support selectivity + tdSql.query("select ts, c1, csum(1) from db.t1") + tdSql.checkRows(7) + + tdSql.query("select csum(1), ts, c1 from db.t1") + tdSql.checkRows(7) + + tdSql.query("select ts, c1, c2, c3, csum(1), ts, c4, c5, c6 from db.t1") + tdSql.checkRows(7) + + tdSql.query("select ts, c1, csum(1), c4, c5, csum(1), c6 from db.t1") + tdSql.checkRows(7) + def csum_support_stable(self): tdSql.query(" select csum(1) from db.stb1 ") tdSql.checkRows(70) @@ -474,6 +487,7 @@ class TDTestCase: # tdSql.checkRows(4) + def run(self): import traceback try: diff --git a/tests/system-test/2-query/function_stateduration.py b/tests/system-test/2-query/function_stateduration.py index 3478b7fef957ad7e0d8b9e0b6867a0dd441736ff..9762b66ba72f3d7f0adffd2c48546328ecac341a 100644 --- a/tests/system-test/2-query/function_stateduration.py +++ b/tests/system-test/2-query/function_stateduration.py @@ -104,8 +104,6 @@ class TDTestCase: "select stateduration(c1 ,'GT',1,1s) , min(c1) from t1", "select stateduration(c1 ,'GT',1,1s) , spread(c1) from t1", "select stateduration(c1 ,'GT',1,1s) , diff(c1) from t1", - "select stateduration(c1 ,'GT',1,1s) , abs(c1) from t1", - "select stateduration(c1 ,'GT',1,1s) , c1 from t1", ] for error_sql in error_sql_lists: tdSql.error(error_sql) @@ -226,18 +224,24 @@ class TDTestCase: tdSql.query("select stateduration(c6,'GT',1,1s) from ct4") tdSql.checkRows(12) - tdSql.error("select stateduration(c6,'GT',1,1s),tbname from ct1") - tdSql.error("select stateduration(c6,'GT',1,1s),t1 from ct1") + tdSql.query("select stateduration(c6,'GT',1,1s),tbname from ct1") + tdSql.checkRows(13) + tdSql.query("select stateduration(c6,'GT',1,1s),t1 from ct1") + tdSql.checkRows(13) # unique with common col - tdSql.error("select stateduration(c6,'GT',1,1s) ,ts from ct1") - tdSql.error("select stateduration(c6,'GT',1,1s) ,c1 from ct1") + tdSql.query("select stateduration(c6,'GT',1,1s) ,ts from ct1") + tdSql.checkRows(13) + tdSql.query("select stateduration(c6,'GT',1,1s) ,c1 from ct1") + tdSql.checkRows(13) # unique with scalar function - tdSql.error("select stateduration(c6,'GT',1,1s) ,abs(c1) from ct1") - tdSql.error("select stateduration(c6,'GT',1,1s) , unique(c2) from ct1") - tdSql.error("select stateduration(c6,'GT',1,1s) , abs(c2)+2 from ct1") + tdSql.query("select stateduration(c6,'GT',1,1s) , abs(c1) from ct1") + tdSql.checkRows(13) + tdSql.query("select stateduration(c6,'GT',1,1s) , abs(c2)+2 from ct1") + tdSql.checkRows(13) + tdSql.error("select stateduration(c6,'GT',1,1s) , unique(c2) from ct1") # unique with aggregate function tdSql.error("select stateduration(c6,'GT',1,1s) ,sum(c1) from ct1") diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py index 09a046d6efe3d3502ba868820e7f7275e961896c..856006aaf1190c2bd0ac4f3ff1f52da523eae6e0 100644 --- a/tests/system-test/2-query/irate.py +++ b/tests/system-test/2-query/irate.py @@ -213,7 +213,7 @@ class TDTestCase: tdSql.error("select irate(c1), abs(c1) from ct4 ") # agg functions mix with agg functions - tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname ") + tdSql.query("select irate(c1), count(c5) from stb1 partition by tbname order by tbname") tdSql.checkData(0, 0, 0.000000000) tdSql.checkData(1, 0, 0.000000000) tdSql.checkData(0, 1, 13) diff --git a/tests/system-test/2-query/statecount.py b/tests/system-test/2-query/statecount.py index 91e2aa9e47d06ba86d0e56c167437fab05abf2c5..a88c4aef9fdad7580d4d10a642093c80750b1c57 100644 --- a/tests/system-test/2-query/statecount.py +++ b/tests/system-test/2-query/statecount.py @@ -105,8 +105,6 @@ class TDTestCase: "select statecount(c1 ,'GT',1) , min(c1) from t1", "select statecount(c1 ,'GT',1) , spread(c1) from t1", "select statecount(c1 ,'GT',1) , diff(c1) from t1", - "select statecount(c1 ,'GT',1) , abs(c1) from t1", - "select statecount(c1 ,'GT',1) , c1 from t1", ] for error_sql in error_sql_lists: tdSql.error(error_sql) @@ -227,17 +225,56 @@ class TDTestCase: tdSql.query("select statecount(c6,'GT',1) from ct4") tdSql.checkRows(12) - tdSql.error("select statecount(c6,'GT',1),tbname from ct1") - tdSql.error("select statecount(c6,'GT',1),t1 from ct1") + tdSql.query("select statecount(c6,'GT',1),tbname from ct1") + tdSql.checkRows(13) + tdSql.query("select statecount(c6,'GT',1),t1 from ct1") + tdSql.checkRows(13) # unique with common col - tdSql.error("select statecount(c6,'GT',1) ,ts from ct1") - tdSql.error("select statecount(c6,'GT',1) ,c1 from ct1") + tdSql.query("select statecount(c6,'GT',1) ,ts from ct1") + tdSql.checkRows(13) + tdSql.query("select ts, statecount(c6,'GT',1) from ct1") + tdSql.checkRows(13) + tdSql.query("select statecount(c6,'GT',1) ,c1 from ct1") + tdSql.checkRows(13) + tdSql.query("select c1, statecount(c6,'GT',1) from ct1") + tdSql.checkRows(13) + tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1) from ct1") + tdSql.checkRows(13) + tdSql.query("select statecount(c6,'GT',1), ts, c1, c2, c3 from ct1") + tdSql.checkRows(13) + tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from ct1") + tdSql.checkRows(13) + + tdSql.query("select stateduration(c6,'GT',1) ,ts from ct1") + tdSql.checkRows(13) + tdSql.query("select ts, stateduration(c6,'GT',1) from ct1") + tdSql.checkRows(13) + tdSql.query("select stateduration(c6,'GT',1) ,c1 from ct1") + tdSql.checkRows(13) + tdSql.query("select c1, stateduration(c6,'GT',1) from ct1") + tdSql.checkRows(13) + tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1) from ct1") + tdSql.checkRows(13) + tdSql.query("select stateduration(c6,'GT',1), ts, c1, c2, c3 from ct1") + tdSql.checkRows(13) + tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from ct1") + tdSql.checkRows(13) # unique with scalar function - tdSql.error("select statecount(c6,'GT',1) ,abs(c1) from ct1") + tdSql.query("select statecount(c6,'GT',1) , abs(c1) from ct1") + tdSql.checkRows(13) + tdSql.query("select statecount(c6,'GT',1) , abs(c2)+2 from ct1") + tdSql.checkRows(13) + tdSql.error("select statecount(c6,'GT',1) , unique(c2) from ct1") - tdSql.error("select statecount(c6,'GT',1) , abs(c2)+2 from ct1") + + tdSql.query("select stateduration(c6,'GT',1) , abs(c1) from ct1") + tdSql.checkRows(13) + tdSql.query("select stateduration(c6,'GT',1) , abs(c2)+2 from ct1") + tdSql.checkRows(13) + + tdSql.error("select stateduration(c6,'GT',1) , unique(c2) from ct1") # unique with aggregate function diff --git a/tests/system-test/6-cluster/5dnode1mnode.py b/tests/system-test/6-cluster/5dnode1mnode.py index ee2d8afb81729b3796d51e9354ce05a65b259e9f..6a8f5a2efbedc32e512c4078b002690982f7aafe 100644 --- a/tests/system-test/6-cluster/5dnode1mnode.py +++ b/tests/system-test/6-cluster/5dnode1mnode.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -18,7 +18,7 @@ class MyDnodes(TDDnodes): super(MyDnodes,self).__init__() self.dnodes = dnodes_lists # dnode must be TDDnode instance self.simDeployed = False - + class TDTestCase: noConn = True def init(self,conn ,logSql): @@ -29,7 +29,7 @@ class TDTestCase: self.host=self.master_dnode.cfgDict["fqdn"] conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) tdSql.init(conn1.cursor()) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -46,12 +46,12 @@ class TDTestCase: buildPath = root[:len(root) - len("/build/bin")] break return buildPath - - def depoly_cluster(self ,dnodes_nums): + + def depoly_cluster(self ,dnodes_nums): testCluster = False - valgrind = 0 + valgrind = 0 hostname = socket.gethostname() dnodes = [] start_port = 6030 @@ -63,7 +63,7 @@ class TDTestCase: dnode.addExtraCfg("monitorFqdn", hostname) dnode.addExtraCfg("monitorPort", 7043) dnodes.append(dnode) - + self.TDDnodes = MyDnodes(dnodes) self.TDDnodes.init("") self.TDDnodes.setTestCluster(testCluster) @@ -71,11 +71,11 @@ class TDTestCase: self.TDDnodes.stopAll() for dnode in self.TDDnodes.dnodes: self.TDDnodes.deploy(dnode.index,{}) - + for dnode in self.TDDnodes.dnodes: self.TDDnodes.starttaosd(dnode.index) - # create cluster + # create cluster for dnode in self.TDDnodes.dnodes[1:]: # print(dnode.cfgDict) dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] @@ -84,7 +84,7 @@ class TDTestCase: cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\"" print(cmd) os.system(cmd) - + time.sleep(2) tdLog.info(" create cluster done! ") @@ -94,7 +94,7 @@ class TDTestCase: tdSql.checkData(4,1,'%s:6430'%self.host) tdSql.checkData(0,4,'ready') tdSql.checkData(4,4,'ready') - tdSql.query("show mnodes;") + tdSql.query("show mnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -120,7 +120,7 @@ class TDTestCase: ) for i in range(4): tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') - + tdSql.query('show databases;') tdSql.checkData(2,5,'off') tdSql.error("alter database db strict 'off'") @@ -135,7 +135,7 @@ class TDTestCase: return taos.connect(host=host, port=int(port), config=config_dir) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.five_dnode_one_mnode() @@ -145,4 +145,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode2mnode.py b/tests/system-test/6-cluster/5dnode2mnode.py index e4df9df4ea9f9f0d06175d5b06347266474e6309..59d4f5f18f6dca5471e2b81cd51575e97ff4ab73 100644 --- a/tests/system-test/6-cluster/5dnode2mnode.py +++ b/tests/system-test/6-cluster/5dnode2mnode.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -17,8 +17,8 @@ import subprocess sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import * - +from clusterCommonCheck import * + class TDTestCase: def init(self,conn ,logSql): tdLog.debug(f"start to excute {__file__}") @@ -48,7 +48,7 @@ class TDTestCase: tdSql.checkData(4,1,'%s:6430'%self.host) tdSql.checkData(0,4,'ready') tdSql.checkData(4,4,'ready') - tdSql.query("show mnodes;") + tdSql.query("show mnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -63,7 +63,7 @@ class TDTestCase: while count < 10: time.sleep(1) tdSql.query("show mnodes;") - tdSql.checkRows(2) + tdSql.checkRows(2) if tdSql.queryResult[0][2]=='leader' : if tdSql.queryResult[1][2]=='follower': print("two mnodes is ready") @@ -73,7 +73,7 @@ class TDTestCase: print("two mnodes is not ready in 10s ") # fisrt check statut ready - + tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -106,7 +106,7 @@ class TDTestCase: clusterComCheck.checkDnodes(5) # restart all taosd tdDnodes=cluster.dnodes - + # stop follower tdLog.info("stop follower") tdDnodes[1].stoptaosd() @@ -118,7 +118,7 @@ class TDTestCase: tdDnodes[1].starttaosd() if clusterComCheck.checkMnodeStatus(2) : print("both mnodes are ready") - + # stop leader tdLog.info("stop leader") tdDnodes[0].stoptaosd() @@ -133,7 +133,7 @@ class TDTestCase: if clusterComCheck.checkMnodeStatus(2) : print("both mnodes are ready") - def run(self): + def run(self): self.five_dnode_two_mnode() @@ -142,4 +142,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py b/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py index 7b86ee00676b86978005f42e95b67fab20f3bbba..af78dfae9df2c26d5c8c406f49845bc01a9244d5 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py +++ b/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py @@ -3,7 +3,7 @@ from numpy import row_stack import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -13,13 +13,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -57,7 +57,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -68,7 +68,7 @@ class TDTestCase: def insertData(self,countstart,countstop): # fisrt add data : db\stable\childtable\general table - + for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -118,7 +118,7 @@ class TDTestCase: rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"] rowsall=rowsPerStb*paraDict['stbNumbers'] dbNumbers = 1 - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -133,7 +133,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -142,7 +142,7 @@ class TDTestCase: # create database and stable clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']) - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") tdDnodes=cluster.dnodes # dnode6=cluster.addDnode(6) @@ -166,7 +166,7 @@ class TDTestCase: newTdSql=tdCom.newTdSql() threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]))) for tr in threads: - tr.start() + tr.start() dnode6Port=int(6030+5*100) tdSql.execute("create dnode '%s:%d'"%(hostname,dnode6Port)) clusterComCheck.checkDnodes(dnodeNumbers) @@ -179,7 +179,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -191,20 +191,20 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): tdLog.info("123") else: print("456") - + self.stopThread(threads) tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + clusterComCheck.checkDnodes(dnodeNumbers) clusterComCheck.checkDbRows(dbNumbers) @@ -217,7 +217,7 @@ class TDTestCase: stableName= '%s_%d'%(paraDict['stbName'],i) tdSql.query("select * from %s"%stableName) tdSql.checkRows(rowsPerStb) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode') @@ -226,4 +226,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeDrop.py b/tests/system-test/6-cluster/5dnode3mnodeDrop.py index e81d5295f28bd0d3335ef3d2ed0c1017701c5efe..32f222dacb4b1b227dd94268121c30527234c5d8 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeDrop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeDrop.py @@ -3,7 +3,7 @@ from paramiko import HostKeys import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -19,7 +19,7 @@ class MyDnodes(TDDnodes): super(MyDnodes,self).__init__() self.dnodes = dnodes_lists # dnode must be TDDnode instance self.simDeployed = False - + class TDTestCase: def init(self,conn ,logSql): @@ -48,7 +48,7 @@ class TDTestCase: buildPath = root[:len(root) - len("/build/bin")] break return buildPath - + def insert_data(self,count): # fisrt add data : db\stable\childtable\general table for couti in count: @@ -70,10 +70,10 @@ class TDTestCase: for i in range(4): tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') - def depoly_cluster(self ,dnodes_nums): + def depoly_cluster(self ,dnodes_nums): testCluster = False - valgrind = 0 + valgrind = 0 hostname = socket.gethostname() tdLog.debug(hostname) dnodes = [] @@ -88,7 +88,7 @@ class TDTestCase: dnode.addExtraCfg("monitorPort", 7043) dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}") dnodes.append(dnode) - + self.TDDnodes = MyDnodes(dnodes) self.TDDnodes.init("") self.TDDnodes.setTestCluster(testCluster) @@ -96,11 +96,11 @@ class TDTestCase: self.TDDnodes.stopAll() for dnode in self.TDDnodes.dnodes: self.TDDnodes.deploy(dnode.index,{}) - + for dnode in self.TDDnodes.dnodes: self.TDDnodes.starttaosd(dnode.index) - # create cluster + # create cluster for dnode in self.TDDnodes.dnodes[1:]: # tdLog.debug(dnode.cfgDict) dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] @@ -109,7 +109,7 @@ class TDTestCase: cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;" tdLog.debug(cmd) os.system(cmd) - + time.sleep(2) tdLog.info(" create cluster with %d dnode done! " %dnodes_nums) @@ -118,8 +118,8 @@ class TDTestCase: while count < 10: time.sleep(1) tdSql.query("show mnodes;") - if tdSql.checkRows(3) : - tdLog.debug("mnode is three nodes") + if tdSql.checkRows(3) : + tdLog.debug("mnode is three nodes") if tdSql.queryResult[0][2]=='leader' : if tdSql.queryResult[1][2]=='follower': if tdSql.queryResult[2][2]=='follower': @@ -129,20 +129,20 @@ class TDTestCase: if tdSql.queryResult[1][2]=='leader': if tdSql.queryResult[2][2]=='follower': tdLog.debug("three mnodes is ready in 10s") - break + break elif tdSql.queryResult[0][2]=='follower' : if tdSql.queryResult[1][2]=='follower': if tdSql.queryResult[2][2]=='leader': tdLog.debug("three mnodes is ready in 10s") - break + break count+=1 else: tdLog.debug(tdSql.queryResult) tdLog.debug("three mnodes is not ready in 10s ") return -1 - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,3,'ready') tdSql.checkData(1,1,'%s:6130'%self.host) @@ -169,11 +169,11 @@ class TDTestCase: count+=1 else: tdLog.debug("stop mnodes on dnode 2 failed in 10s ") - return -1 + return -1 tdSql.error("drop mnode on dnode 1;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'offline') tdSql.checkData(0,3,'ready') @@ -200,8 +200,8 @@ class TDTestCase: return -1 tdSql.error("drop mnode on dnode 2;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -229,8 +229,8 @@ class TDTestCase: tdLog.debug("stop mnodes on dnode 3 failed in 10s") return -1 tdSql.error("drop mnode on dnode 3;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -249,8 +249,8 @@ class TDTestCase: tdSql.checkData(4,1,'%s:6430'%self.host) tdSql.checkData(0,4,'ready') tdSql.checkData(4,4,'ready') - tdSql.query("show mnodes;") - tdSql.checkRows(1) + tdSql.query("show mnodes;") + tdSql.checkRows(1) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -270,8 +270,8 @@ class TDTestCase: tdSql.query("show dnodes;") tdLog.debug(tdSql.queryResult) - # drop follower of mnode - dropcount =0 + # drop follower of mnode + dropcount =0 while dropcount <= 10: for i in range(1,3): tdLog.debug("drop mnode on dnode %d"%(i+1)) @@ -306,7 +306,7 @@ class TDTestCase: return taos.connect(host=host, port=int(port), config=config_dir) - def run(self): + def run(self): # tdLog.debug(self.master_dnode.cfgDict) self.buildcluster(5) self.five_dnode_three_mnode() @@ -316,4 +316,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py b/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py index cfa39206048b6579238ddf55d74cd10e351e0edc..106bb26264a0981017f40d2374d4b43b398b847d 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py +++ b/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -13,7 +13,7 @@ import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -37,7 +37,7 @@ class TDTestCase: self.host=self.master_dnode.cfgDict["fqdn"] conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) tdSql.init(conn1.cursor()) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -63,7 +63,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -74,7 +74,7 @@ class TDTestCase: def createDbTbale(self,dbcountStart,dbcountStop,stbname,chilCount): # fisrt add data : db\stable\childtable\general table - + for couti in range(dbcountStart,dbcountStop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -98,7 +98,7 @@ class TDTestCase: def insertTabaleData(self,dbcountStart,dbcountStop,stbname,chilCount,ts_start,rowCount): # insert data : create childtable and data - + for couti in range(dbcountStart,dbcountStop): tdSql.execute("use db%d" %couti) pre_insert = "insert into " @@ -115,7 +115,7 @@ class TDTestCase: # print(sql) tdSql.execute(sql) sql = "insert into %s_%d values " %(stbname,i) - # end sql + # end sql if sql != pre_insert: # print(sql) print(len(sql)) @@ -134,13 +134,13 @@ class TDTestCase: for i in range(stableCount): tdSql.query("select count(*) from %s%d"%(stbname,i)) tdSql.checkData(0,0,rowsPerSTable) - return - + return - def depoly_cluster(self ,dnodes_nums): + + def depoly_cluster(self ,dnodes_nums): testCluster = False - valgrind = 0 + valgrind = 0 hostname = socket.gethostname() dnodes = [] start_port = 6030 @@ -154,7 +154,7 @@ class TDTestCase: dnode.addExtraCfg("monitorPort", 7043) dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}") dnodes.append(dnode) - + self.TDDnodes = MyDnodes(dnodes) self.TDDnodes.init("") self.TDDnodes.setTestCluster(testCluster) @@ -162,11 +162,11 @@ class TDTestCase: self.TDDnodes.stopAll() for dnode in self.TDDnodes.dnodes: self.TDDnodes.deploy(dnode.index,{}) - + for dnode in self.TDDnodes.dnodes: self.TDDnodes.starttaosd(dnode.index) - # create cluster + # create cluster for dnode in self.TDDnodes.dnodes[1:]: # print(dnode.cfgDict) dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] @@ -175,7 +175,7 @@ class TDTestCase: cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;" print(cmd) os.system(cmd) - + time.sleep(2) tdLog.info(" create cluster with %d dnode done! " %dnodes_nums) @@ -185,8 +185,8 @@ class TDTestCase: time.sleep(1) statusReadyBumber=0 tdSql.query("show dnodes;") - if tdSql.checkRows(dnodenumber) : - print("dnode is %d nodes"%dnodenumber) + if tdSql.checkRows(dnodenumber) : + print("dnode is %d nodes"%dnodenumber) for i in range(dnodenumber): if tdSql.queryResult[i][4] !='ready' : status=tdSql.queryResult[i][4] @@ -203,15 +203,15 @@ class TDTestCase: else: print("%d mnodes is not ready in 10s "%dnodenumber) return False - + def check3mnode(self): count=0 while count < 10: time.sleep(1) tdSql.query("show mnodes;") - if tdSql.checkRows(3) : - print("mnode is three nodes") + if tdSql.checkRows(3) : + print("mnode is three nodes") if tdSql.queryResult[0][2]=='leader' : if tdSql.queryResult[1][2]=='follower': if tdSql.queryResult[2][2]=='follower': @@ -221,19 +221,19 @@ class TDTestCase: if tdSql.queryResult[1][2]=='leader': if tdSql.queryResult[2][2]=='follower': print("three mnodes is ready in 10s") - break + break elif tdSql.queryResult[0][2]=='follower' : if tdSql.queryResult[1][2]=='follower': if tdSql.queryResult[2][2]=='leader': print("three mnodes is ready in 10s") - break + break count+=1 else: print("three mnodes is not ready in 10s ") return -1 - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,3,'ready') tdSql.checkData(1,1,'%s:6130'%self.host) @@ -263,8 +263,8 @@ class TDTestCase: return -1 tdSql.error("drop mnode on dnode 1;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'offline') tdSql.checkData(0,3,'ready') @@ -291,8 +291,8 @@ class TDTestCase: return -1 tdSql.error("drop mnode on dnode 2;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -320,8 +320,8 @@ class TDTestCase: print("stop mnodes on dnode 3 failed in 10s") return -1 tdSql.error("drop mnode on dnode 3;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -348,8 +348,8 @@ class TDTestCase: tdSql.checkData(4,1,'%s:6430'%self.host) tdSql.checkData(0,4,'ready') tdSql.checkData(4,4,'ready') - tdSql.query("show mnodes;") - tdSql.checkRows(1) + tdSql.query("show mnodes;") + tdSql.checkRows(1) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -364,7 +364,7 @@ class TDTestCase: tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") print(tdSql.queryResult) - tdLog.debug("stop all of mnode ") + tdLog.debug("stop all of mnode ") # drop follower of mnode and insert data self.createDbTbale(dbcountStart, dbcountStop,stbname,tablesPerStb) @@ -378,7 +378,7 @@ class TDTestCase: rowsPerTable)) threads.start() - dropcount =0 + dropcount =0 while dropcount <= 10: for i in range(1,3): tdLog.debug("drop mnode on dnode %d"%(i+1)) @@ -415,7 +415,7 @@ class TDTestCase: return taos.connect(host=host, port=int(port), config=config_dir) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.buildcluster(5) self.five_dnode_three_mnode(5) @@ -425,4 +425,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py index 48ee90fad2a755862561bcde88feff27a6b68639..65d525cfd18825f02993c33063d6eec3d23ffbcc 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py @@ -3,7 +3,7 @@ from numpy import row_stack import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -13,13 +13,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -57,7 +57,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -68,7 +68,7 @@ class TDTestCase: def insertData(self,countstart,countstop): # fisrt add data : db\stable\childtable\general table - + for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -112,7 +112,7 @@ class TDTestCase: } username="user1" passwd="123" - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) @@ -120,7 +120,7 @@ class TDTestCase: rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"] rowsall=rowsPerStb*paraDict['stbNumbers'] dbNumbers = 1 - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -135,7 +135,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -162,10 +162,10 @@ class TDTestCase: for i in range(tdSql.queryRows): if tdSql.queryResult[i][0] == "%s"%username : tdLog.info("create user:%s successfully"%username) - + # # create database and stable # clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']) - # tdLog.info("Take turns stopping Mnodes ") + # tdLog.info("Take turns stopping Mnodes ") # tdDnodes=cluster.dnodes # stopcount =0 @@ -197,7 +197,7 @@ class TDTestCase: # tdDnodes[i].stoptaosd() # # sleep(10) # tdDnodes[i].starttaosd() - # # sleep(10) + # # sleep(10) # elif stopRole == "vnode": # for i in range(vnodeNumbers): # tdDnodes[i+mnodeNums].stoptaosd() @@ -209,7 +209,7 @@ class TDTestCase: # tdDnodes[i].stoptaosd() # # sleep(10) # tdDnodes[i].starttaosd() - # # sleep(10) + # # sleep(10) # # dnodeNumbers don't include database of schema # if clusterComCheck.checkDnodes(dnodeNumbers): @@ -220,7 +220,7 @@ class TDTestCase: # tdLog.exit("one or more of dnodes failed to start ") # # self.check3mnode() # stopcount+=1 - + # clusterComCheck.checkDnodes(dnodeNumbers) # clusterComCheck.checkDbRows(dbNumbers) @@ -234,7 +234,7 @@ class TDTestCase: # # tdSql.query("select * from %s"%stableName) # # tdSql.checkRows(rowsPerStb) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode') @@ -243,4 +243,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py index 8ae09dce1601965277f04e3f1da7f7273193f42b..8a0c90966bfd73aa8594d4e5b81bca3ee0c14b9d 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py @@ -3,7 +3,7 @@ from numpy import row_stack import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -13,13 +13,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -57,7 +57,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -68,7 +68,7 @@ class TDTestCase: def insertData(self,countstart,countstop): # fisrt add data : db\stable\childtable\general table - + for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -110,7 +110,7 @@ class TDTestCase: "rowsPerTbl": 100, "batchNum": 5000 } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) @@ -118,7 +118,7 @@ class TDTestCase: rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"] rowsall=rowsPerStb*paraDict['stbNumbers'] dbNumbers = 1 - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -133,7 +133,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -142,7 +142,7 @@ class TDTestCase: # create database and stable clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']) - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") tdDnodes=cluster.dnodes stopcount =0 @@ -174,7 +174,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -186,7 +186,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): @@ -197,7 +197,7 @@ class TDTestCase: tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + clusterComCheck.checkDnodes(dnodeNumbers) clusterComCheck.checkDbRows(dbNumbers) @@ -211,7 +211,7 @@ class TDTestCase: # tdSql.query("select * from %s"%stableName) # tdSql.checkRows(rowsPerStb) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode') @@ -220,4 +220,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py index 87d108cdeb418f19b6f588a506195c786f366f75..5f02efc7cef5e9f7e483c032aafe91bf922c027f 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py @@ -3,7 +3,7 @@ from numpy import row_stack import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -13,13 +13,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -57,7 +57,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -68,7 +68,7 @@ class TDTestCase: def insertData(self,countstart,countstop): # fisrt add data : db\stable\childtable\general table - + for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -110,7 +110,7 @@ class TDTestCase: "rowsPerTbl": 100, "batchNum": 5000 } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) @@ -118,7 +118,7 @@ class TDTestCase: rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"] rowsall=rowsPerStb*paraDict['stbNumbers'] dbNumbers = 1 - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -133,7 +133,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -142,7 +142,7 @@ class TDTestCase: # create database and stable clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']) - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") tdDnodes=cluster.dnodes stopcount =0 @@ -173,7 +173,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -185,7 +185,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): @@ -196,10 +196,10 @@ class TDTestCase: tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() - + clusterComCheck.checkDnodes(dnodeNumbers) clusterComCheck.checkDbRows(dbNumbers) # clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"]) @@ -212,7 +212,7 @@ class TDTestCase: # tdSql.query("select * from %s"%stableName) # tdSql.checkRows(rowsPerStb) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode') @@ -221,4 +221,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py index 3c0e47903055c475222061da72cc47607581ad8c..6debffdeb8ef297b19a0c21ae07199104022f754 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -12,13 +12,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -56,7 +56,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -67,7 +67,7 @@ class TDTestCase: def insertData(self,countstart,countstop): # fisrt add data : db\stable\childtable\general table - + for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -107,13 +107,13 @@ class TDTestCase: 'ctbPrefix': 'ctb', 'ctbNum': 1, } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) allDbNumbers=(paraDict['dbNumbers']*restartNumbers) allStbNumbers=(paraDict['stbNumbers']*restartNumbers) - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -128,7 +128,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -149,7 +149,7 @@ class TDTestCase: for tr in threads: tr.start() - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") while stopcount < restartNumbers: tdLog.info(" restart loop: %d"%stopcount ) if stopRole == "mnode": @@ -157,7 +157,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -169,7 +169,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): @@ -180,7 +180,7 @@ class TDTestCase: tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() tdLog.info("check dnode number:") @@ -196,7 +196,7 @@ class TDTestCase: - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='dnode') @@ -205,4 +205,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py index f685ef2f1a7308a89d2f368934d35ac9452ea2b7..919c560330f74df541ecdde642dd3cfadcb65e03 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -12,13 +12,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -56,7 +56,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -81,13 +81,13 @@ class TDTestCase: 'ctbPrefix': 'ctb', 'ctbNum': 1, } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) allStbNumbers=(paraDict['stbNumbers']*restartNumbers) dbNumbers = 1 - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -102,7 +102,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -111,7 +111,7 @@ class TDTestCase: # create database and stable clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']) - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") tdDnodes=cluster.dnodes stopcount =0 @@ -130,7 +130,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -142,19 +142,19 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): tdLog.info("123") else: print("456") - + self.stopThread(threads) tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() clusterComCheck.checkDnodes(dnodeNumbers) @@ -169,7 +169,7 @@ class TDTestCase: - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode') @@ -178,4 +178,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py index 8f99ef0b5c0e8f9b1a38351290b9d472cc96253d..562721581dd5474a87f85a69cc0a04957b462d0e 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py @@ -3,7 +3,7 @@ from numpy import row_stack import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -13,13 +13,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -57,7 +57,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -68,7 +68,7 @@ class TDTestCase: def insertData(self,countstart,countstop): # fisrt add data : db\stable\childtable\general table - + for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -110,7 +110,7 @@ class TDTestCase: "rowsPerTbl": 10000, "batchNum": 5000 } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) @@ -118,7 +118,7 @@ class TDTestCase: rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"] rowsall=rowsPerStb*paraDict['stbNumbers'] dbNumbers = 1 - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -133,7 +133,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -142,7 +142,7 @@ class TDTestCase: # create database and stable clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']) - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") tdDnodes=cluster.dnodes stopcount =0 @@ -171,7 +171,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -183,19 +183,19 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): tdLog.info("123") else: print("456") - + self.stopThread(threads) tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() clusterComCheck.checkDnodes(dnodeNumbers) @@ -209,7 +209,7 @@ class TDTestCase: stableName= '%s_%d'%(paraDict['stbName'],i) tdSql.query("select * from %s"%stableName) tdSql.checkRows(rowsPerStb) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode') @@ -218,4 +218,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py index d39bae68f966ea7f6329060ecdaa91f4b83cb37a..e9b032c003d3728432bfd6fcf035f5c3ddcd42c6 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -12,13 +12,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -56,7 +56,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -82,13 +82,13 @@ class TDTestCase: 'ctbPrefix': 'ctb', 'ctbNum': 1, } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) allDbNumbers=(paraDict['dbNumbers']*restartNumbers) allStbNumbers=(paraDict['stbNumbers']*restartNumbers) - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -105,14 +105,14 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") print(tdSql.queryResult) clusterComCheck.checkDnodes(dnodeNumbers) - tdLog.info("create database and stable") + tdLog.info("create database and stable") tdDnodes=cluster.dnodes stopcount =0 threads=[] @@ -124,7 +124,7 @@ class TDTestCase: for tr in threads: tr.start() - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") while stopcount < restartNumbers: tdLog.info(" restart loop: %d"%stopcount ) if stopRole == "mnode": @@ -132,7 +132,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -144,7 +144,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): @@ -155,7 +155,7 @@ class TDTestCase: tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() tdLog.info("check dnode number:") @@ -170,7 +170,7 @@ class TDTestCase: # clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i)) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='mnode') @@ -179,4 +179,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py index 1fa77d3bfd3342a75ef07490abd1d8aff34c462c..99efabd8ea5c9236894db0e1f92455fc40fdb90a 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -12,13 +12,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -56,7 +56,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -82,13 +82,13 @@ class TDTestCase: 'ctbPrefix': 'ctb', 'ctbNum': 1, } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) allDbNumbers=(paraDict['dbNumbers']*restartNumbers) allStbNumbers=(paraDict['stbNumbers']*restartNumbers) - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -103,14 +103,14 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") print(tdSql.queryResult) clusterComCheck.checkDnodes(dnodeNumbers) - tdLog.info("create database and stable") + tdLog.info("create database and stable") tdDnodes=cluster.dnodes stopcount =0 threads=[] @@ -122,7 +122,7 @@ class TDTestCase: for tr in threads: tr.start() - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") while stopcount < restartNumbers: tdLog.info(" restart loop: %d"%stopcount ) if stopRole == "mnode": @@ -130,7 +130,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -142,7 +142,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): @@ -153,14 +153,14 @@ class TDTestCase: tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() tdLog.info("check dnode number:") clusterComCheck.checkDnodes(dnodeNumbers) tdSql.query("show databases") tdLog.debug("we find %d databases but exepect to create %d databases "%(tdSql.queryRows-2,allDbNumbers-2)) - + # tdLog.info("check DB Rows:") # clusterComCheck.checkDbRows(allDbNumbers) # tdLog.info("check DB Status on by on") @@ -168,7 +168,7 @@ class TDTestCase: # clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i)) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='mnode') @@ -177,4 +177,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py index 3a1042766487d5ab30aae145a97c087ea75dff62..d8c9b9e54d9af4c069ed32d50b0e9d73a04cb5f7 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -12,13 +12,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -56,7 +56,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -67,7 +67,7 @@ class TDTestCase: def insertData(self,countstart,countstop): # fisrt add data : db\stable\childtable\general table - + for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -106,13 +106,13 @@ class TDTestCase: 'ctbPrefix': 'ctb', 'ctbNum': 1, } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) allStbNumbers=(paraDict['stbNumbers']*restartNumbers) dbNumbers = 1 - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -127,7 +127,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -148,7 +148,7 @@ class TDTestCase: for tr in threads: tr.start() - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") while stopcount < restartNumbers: tdLog.info(" restart loop: %d"%stopcount ) @@ -157,7 +157,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -169,19 +169,19 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): tdLog.info("123") else: print("456") - + self.stopThread(threads) tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() clusterComCheck.checkDnodes(dnodeNumbers) @@ -195,7 +195,7 @@ class TDTestCase: # tdSql.checkRows(allStbNumbers) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='mnode') @@ -204,4 +204,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py index da32d1b4a8c989ccaf4a8af228e9f6b47d64b491..706c8ad9d58fe7aba044bd7cb13c8a5f19cbbe4b 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -12,13 +12,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -56,7 +56,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -82,13 +82,13 @@ class TDTestCase: 'ctbPrefix': 'ctb', 'ctbNum': 1, } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) allDbNumbers=(paraDict['dbNumbers']*restartNumbers) allStbNumbers=(paraDict['stbNumbers']*restartNumbers) - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -103,7 +103,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -124,7 +124,7 @@ class TDTestCase: for tr in threads: tr.start() - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") while stopcount < restartNumbers: tdLog.info(" restart loop: %d"%stopcount ) if stopRole == "mnode": @@ -132,7 +132,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -144,7 +144,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): @@ -155,7 +155,7 @@ class TDTestCase: tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() clusterComCheck.checkDnodes(dnodeNumbers) @@ -169,7 +169,7 @@ class TDTestCase: # clusterComCheck.checkDb(paraDict['dbNumbers'],restartNumbers,dbNameIndex = '%s%d'%(paraDict["dbName"],i)) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=10,stopRole='vnode') @@ -178,4 +178,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py index 5c9e3587c42d128c483824b23f73a3a775ce48ea..c9f7cdacaf82a70d1efdc6161f8fb8b4791f19c5 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -12,13 +12,13 @@ from util.dnodes import TDDnode from util.cluster import * sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import clusterComCheck +from clusterCommonCheck import clusterComCheck import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -31,7 +31,7 @@ class TDTestCase: tdSql.init(conn.cursor()) self.host = socket.gethostname() print(tdSql) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -56,7 +56,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -67,7 +67,7 @@ class TDTestCase: def insertData(self,countstart,countstop): # fisrt add data : db\stable\childtable\general table - + for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -106,13 +106,13 @@ class TDTestCase: 'ctbPrefix': 'ctb', 'ctbNum': 1, } - + dnodeNumbers=int(dnodeNumbers) mnodeNums=int(mnodeNums) vnodeNumbers = int(dnodeNumbers-mnodeNums) allStbNumbers=(paraDict['stbNumbers']*restartNumbers) dbNumbers = 1 - + print(tdSql) tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") @@ -128,7 +128,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -137,12 +137,12 @@ class TDTestCase: # create database and stable clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']) - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") tdDnodes=cluster.dnodes stopcount =0 threads=[] - + for i in range(restartNumbers): stableName= '%s%d'%(paraDict['stbName'],i) newTdSql=tdCom.newTdSql() @@ -151,7 +151,7 @@ class TDTestCase: for tr in threads: tr.start() - tdLog.info("Take turns stopping Mnodes ") + tdLog.info("Take turns stopping Mnodes ") while stopcount < restartNumbers: tdLog.info(" restart loop: %d"%stopcount ) if stopRole == "mnode": @@ -159,7 +159,7 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) elif stopRole == "vnode": for i in range(vnodeNumbers): tdDnodes[i+mnodeNums].stoptaosd() @@ -171,19 +171,19 @@ class TDTestCase: tdDnodes[i].stoptaosd() # sleep(10) tdDnodes[i].starttaosd() - # sleep(10) + # sleep(10) # dnodeNumbers don't include database of schema if clusterComCheck.checkDnodes(dnodeNumbers): tdLog.info("123") else: print("456") - + self.stopThread(threads) tdLog.exit("one or more of dnodes failed to start ") # self.check3mnode() stopcount+=1 - + for tr in threads: tr.join() clusterComCheck.checkDnodes(dnodeNumbers) @@ -197,7 +197,7 @@ class TDTestCase: tdSql.checkRows(allStbNumbers) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='vnode') @@ -206,4 +206,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py b/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py index aa1d7ecc290c5950fa50ab29d1a97aff43ee58ed..bc1530bb8bdaa60d31e89d53b6cd819e705d522f 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSeperate1VnodeStopInsert.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -18,7 +18,7 @@ import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -54,7 +54,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -65,7 +65,7 @@ class TDTestCase: def insert_data(self,countstart,countstop): # fisrt add data : db\stable\childtable\general table - + for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -96,7 +96,7 @@ class TDTestCase: for i in range(stableCount): tdSql.query("select count(*) from %s%d"%(stbname,i)) tdSql.checkData(0,0,rowsPerSTable) - return + return def checkdnodes(self,dnodenumber): count=0 @@ -104,8 +104,8 @@ class TDTestCase: time.sleep(1) statusReadyBumber=0 tdSql.query("show dnodes;") - if tdSql.checkRows(dnodenumber) : - print("dnode is %d nodes"%dnodenumber) + if tdSql.checkRows(dnodenumber) : + print("dnode is %d nodes"%dnodenumber) for i in range(dnodenumber): if tdSql.queryResult[i][4] !='ready' : status=tdSql.queryResult[i][4] @@ -122,15 +122,15 @@ class TDTestCase: else: print("%d mnodes is not ready in 10s "%dnodenumber) return False - + def check3mnode(self): count=0 while count < 10: time.sleep(1) tdSql.query("show mnodes;") - if tdSql.checkRows(3) : - print("mnode is three nodes") + if tdSql.checkRows(3) : + print("mnode is three nodes") if tdSql.queryResult[0][2]=='leader' : if tdSql.queryResult[1][2]=='follower': if tdSql.queryResult[2][2]=='follower': @@ -140,19 +140,19 @@ class TDTestCase: if tdSql.queryResult[1][2]=='leader': if tdSql.queryResult[2][2]=='follower': print("three mnodes is ready in 10s") - break + break elif tdSql.queryResult[0][2]=='follower' : if tdSql.queryResult[1][2]=='follower': if tdSql.queryResult[2][2]=='leader': print("three mnodes is ready in 10s") - break + break count+=1 else: print("three mnodes is not ready in 10s ") return -1 - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,3,'ready') tdSql.checkData(1,1,'%s:6130'%self.host) @@ -182,8 +182,8 @@ class TDTestCase: return -1 tdSql.error("drop mnode on dnode 1;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'offline') tdSql.checkData(0,3,'ready') @@ -210,8 +210,8 @@ class TDTestCase: return -1 tdSql.error("drop mnode on dnode 2;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -239,8 +239,8 @@ class TDTestCase: print("stop mnodes on dnode 3 failed in 10s") return -1 tdSql.error("drop mnode on dnode 3;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -258,15 +258,15 @@ class TDTestCase: tdSql.checkData(4,1,'%s:6430'%self.host) tdSql.checkData(0,4,'ready') tdSql.checkData(4,4,'ready') - + def five_dnode_three_mnode(self,dnodenumber): tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(4,1,'%s:6430'%self.host) tdSql.checkData(0,4,'ready') tdSql.checkData(4,4,'ready') - tdSql.query("show mnodes;") - tdSql.checkRows(1) + tdSql.query("show mnodes;") + tdSql.checkRows(1) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -281,15 +281,15 @@ class TDTestCase: tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") print(tdSql.queryResult) - tdLog.debug("stop all of mnode ") + tdLog.debug("stop all of mnode ") # seperate vnode and mnode in different dnodes. # create database and stable - stopcount =0 + stopcount =0 while stopcount < 2: for i in range(dnodenumber): # threads=[] - # threads = MyThreadFunc(self.insert_data(i*2,i*2+2)) + # threads = MyThreadFunc(self.insert_data(i*2,i*2+2)) threads=threading.Thread(target=self.insert_data, args=(i,i+1)) threads.start() self.TDDnodes.stoptaosd(i+1) @@ -306,13 +306,13 @@ class TDTestCase: return False # self.check3mnode() self.check3mnode() - + stopcount+=1 self.check3mnode() - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.five_dnode_three_mnode(5) @@ -321,4 +321,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop.py b/tests/system-test/6-cluster/5dnode3mnodeStop.py index 46e77710790d73afddfa74cc1e1aeb0ad00a1c5c..09974db8843576ff19846be6f0537650ec781227 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStop.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -15,13 +15,13 @@ from test import tdDnodes sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import * +from clusterCommonCheck import * import time import socket import subprocess from multiprocessing import Process - + class TDTestCase: def init(self,conn ,logSql): @@ -69,7 +69,7 @@ class TDTestCase: dnodenumbers=int(dnodenumbers) mnodeNums=int(mnodeNums) dbNumbers = int(dnodenumbers * restartNumber) - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -84,7 +84,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -109,10 +109,10 @@ class TDTestCase: clusterComCheck.checkMnodeStatus(3) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1) - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py b/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py index fef26333b7f81a9a1a689bb74cf4f1acd8c8e783..9211ed3af8d702d4c2c6b919d3b87656fd104297 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -15,13 +15,13 @@ from test import tdDnodes sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import * +from clusterCommonCheck import * import time import socket import subprocess from multiprocessing import Process - + class TDTestCase: def init(self,conn ,logSql): @@ -69,7 +69,7 @@ class TDTestCase: dnodenumbers=int(dnodenumbers) mnodeNums=int(mnodeNums) dbNumbers = 1 - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -84,7 +84,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -99,7 +99,7 @@ class TDTestCase: tdLog.info("check whether 2 mnode status is offline") clusterComCheck.check3mnode2off() # tdSql.error("create user user1 pass '123';") - + tdLog.info("start two follower") tdDnodes[1].starttaosd() tdDnodes[2].starttaosd() @@ -107,10 +107,10 @@ class TDTestCase: clusterComCheck.checkMnodeStatus(mnodeNums) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1) - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py b/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py index f1eb2a4587d6c60b05c4e6f108ada1a2697b1dc7..0a8c94b080328a2c693058f0679c4a412a6bba05 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -16,13 +16,13 @@ from test import tdDnodes sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import * +from clusterCommonCheck import * import time import socket import subprocess from multiprocessing import Process - + class TDTestCase: def init(self,conn ,logSql): @@ -69,7 +69,7 @@ class TDTestCase: dnodenumbers=int(dnodenumbers) mnodeNums=int(mnodeNums) dbNumbers = int(dnodenumbers * restartNumber) - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -84,7 +84,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -96,21 +96,21 @@ class TDTestCase: # restart all taosd - tdDnodes=cluster.dnodes + tdDnodes=cluster.dnodes for i in range(mnodeNums): tdDnodes[i].stoptaosd() for j in range(dnodenumbers): if j != i: cluster.checkConnectStatus(j) clusterComCheck.check3mnodeoff(i+1,3) - clusterComCheck.init(cluster.checkConnectStatus(i+1)) + clusterComCheck.init(cluster.checkConnectStatus(i+1)) tdDnodes[i].starttaosd() clusterComCheck.checkMnodeStatus(mnodeNums) - - tdLog.info("Take turns stopping all dnodes ") + + tdLog.info("Take turns stopping all dnodes ") # seperate vnode and mnode in different dnodes. # create database and stable - stopcount =0 + stopcount =0 while stopcount < restartNumber: tdLog.info("first restart loop") for i in range(dnodenumbers): @@ -120,13 +120,13 @@ class TDTestCase: clusterComCheck.checkDnodes(dnodenumbers) clusterComCheck.checkMnodeStatus(mnodeNums) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(5,3,1) - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py b/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py index 59a1a8f697e0991aaaf4291c0e46cf862b11bed1..e5cf7c5254e36ce34925d8197118629ae1d71ecc 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -15,13 +15,13 @@ from test import tdDnodes sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import * +from clusterCommonCheck import * import time import socket import subprocess from multiprocessing import Process - + class TDTestCase: def init(self,conn ,logSql): @@ -69,7 +69,7 @@ class TDTestCase: dnodenumbers=int(dnodenumbers) mnodeNums=int(mnodeNums) dbNumbers = 1 - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -84,7 +84,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -100,20 +100,20 @@ class TDTestCase: # tdLog.info("check whether 2 mnode status is offline") # clusterComCheck.check3mnode2off() # tdSql.error("create user user1 pass '123';") - + tdLog.info("start one mnode" ) tdDnodes[0].starttaosd() clusterComCheck.check3mnodeoff(2) - + clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica']) clusterComCheck.checkDb(dbNumbers,1,'db0') - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1) - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py b/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py index a53930faac92ffb2ce432a84dcaa4fc1d934d697..0d7e4530425f7a8db1e38594dd1544c3c062933c 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -13,7 +13,7 @@ import time import socket import subprocess from multiprocessing import Process -import threading +import threading import time import inspect import ctypes @@ -36,7 +36,7 @@ class TDTestCase: self.host=self.master_dnode.cfgDict["fqdn"] conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir) tdSql.init(conn1.cursor()) - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -62,7 +62,7 @@ class TDTestCase: if res == 0: raise ValueError("invalid thread id") elif res != 1: - # """if it returns a number greater than one, you're in trouble, + # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") @@ -73,7 +73,7 @@ class TDTestCase: def insert_data(self,countstart,countstop): # fisrt add data : db\stable\childtable\general table - + for couti in range(countstart,countstop): tdLog.debug("drop database if exists db%d" %couti) tdSql.execute("drop database if exists db%d" %couti) @@ -95,10 +95,10 @@ class TDTestCase: for i in range(4): tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )') - def depoly_cluster(self ,dnodes_nums): + def depoly_cluster(self ,dnodes_nums): testCluster = False - valgrind = 0 + valgrind = 0 hostname = socket.gethostname() dnodes = [] start_port = 6030 @@ -112,7 +112,7 @@ class TDTestCase: dnode.addExtraCfg("monitorPort", 7043) dnode.addExtraCfg("secondEp", f"{hostname}:{start_port_sec}") dnodes.append(dnode) - + self.TDDnodes = MyDnodes(dnodes) self.TDDnodes.init("") self.TDDnodes.setTestCluster(testCluster) @@ -120,11 +120,11 @@ class TDTestCase: self.TDDnodes.stopAll() for dnode in self.TDDnodes.dnodes: self.TDDnodes.deploy(dnode.index,{}) - + for dnode in self.TDDnodes.dnodes: self.TDDnodes.starttaosd(dnode.index) - # create cluster + # create cluster for dnode in self.TDDnodes.dnodes[1:]: # tdLog.debug(dnode.cfgDict) dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"] @@ -133,7 +133,7 @@ class TDTestCase: cmd = f" taos -h {dnode_first_host} -P {dnode_first_port} -s ' create dnode \"{dnode_id} \" ' ;" tdLog.debug(cmd) os.system(cmd) - + time.sleep(2) tdLog.info(" create cluster with %d dnode done! " %dnodes_nums) @@ -143,8 +143,8 @@ class TDTestCase: time.sleep(1) statusReadyBumber=0 tdSql.query("show dnodes;") - if tdSql.checkRows(dnodenumber) : - tdLog.debug("dnode is %d nodes"%dnodenumber) + if tdSql.checkRows(dnodenumber) : + tdLog.debug("dnode is %d nodes"%dnodenumber) for i in range(dnodenumber): if tdSql.queryResult[i][4] !='ready' : status=tdSql.queryResult[i][4] @@ -161,15 +161,15 @@ class TDTestCase: else: tdLog.debug("%d mnodes is not ready in 10s "%dnodenumber) return False - + def check3mnode(self): count=0 while count < 10: time.sleep(1) tdSql.query("show mnodes;") - if tdSql.checkRows(3) : - tdLog.debug("mnode is three nodes") + if tdSql.checkRows(3) : + tdLog.debug("mnode is three nodes") if tdSql.queryResult[0][2]=='leader' : if tdSql.queryResult[1][2]=='follower': if tdSql.queryResult[2][2]=='follower': @@ -179,19 +179,19 @@ class TDTestCase: if tdSql.queryResult[1][2]=='leader': if tdSql.queryResult[2][2]=='follower': tdLog.debug("three mnodes is ready in 10s") - break + break elif tdSql.queryResult[0][2]=='follower' : if tdSql.queryResult[1][2]=='follower': if tdSql.queryResult[2][2]=='leader': tdLog.debug("three mnodes is ready in 10s") - break + break count+=1 else: tdLog.debug("three mnodes is not ready in 10s ") return -1 - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,3,'ready') tdSql.checkData(1,1,'%s:6130'%self.host) @@ -221,8 +221,8 @@ class TDTestCase: return -1 tdSql.error("drop mnode on dnode 1;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'offline') tdSql.checkData(0,3,'ready') @@ -249,8 +249,8 @@ class TDTestCase: return -1 tdSql.error("drop mnode on dnode 2;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -278,8 +278,8 @@ class TDTestCase: tdLog.debug("stop mnodes on dnode 3 failed in 10s") return -1 tdSql.error("drop mnode on dnode 3;") - tdSql.query("show mnodes;") - tdSql.checkRows(3) + tdSql.query("show mnodes;") + tdSql.checkRows(3) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -296,8 +296,8 @@ class TDTestCase: tdSql.checkData(4,1,'%s:6430'%self.host) tdSql.checkData(0,4,'ready') tdSql.checkData(4,4,'ready') - tdSql.query("show mnodes;") - tdSql.checkRows(1) + tdSql.query("show mnodes;") + tdSql.checkRows(1) tdSql.checkData(0,1,'%s:6030'%self.host) tdSql.checkData(0,2,'leader') tdSql.checkData(0,3,'ready') @@ -312,13 +312,13 @@ class TDTestCase: tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") tdLog.debug(tdSql.queryResult) - tdLog.debug("stop all of mnode ") + tdLog.debug("stop all of mnode ") - stopcount =0 + stopcount =0 while stopcount <= 2: for i in range(dnodenumber): # threads=[] - # threads = MyThreadFunc(self.insert_data(i*2,i*2+2)) + # threads = MyThreadFunc(self.insert_data(i*2,i*2+2)) threads=threading.Thread(target=self.insert_data, args=((stopcount+i)*2,(i+stopcount)*2+2)) threads.start() self.TDDnodes.stoptaosd(i+1) @@ -344,7 +344,7 @@ class TDTestCase: return taos.connect(host=host, port=int(port), config=config_dir) - def run(self): + def run(self): # tdLog.debug(self.master_dnode.cfgDict) self.buildcluster(5) self.five_dnode_three_mnode(5) @@ -354,4 +354,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py b/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py index e0c91e5ac454b3c2fd9377010c733232b73f009e..c7c45a19c68dafad067de59e1d6f6aeab0e57ab2 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py @@ -2,7 +2,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -15,13 +15,13 @@ from test import tdDnodes sys.path.append("./6-cluster") from clusterCommonCreate import * -from clusterCommonCheck import * +from clusterCommonCheck import * import time import socket import subprocess from multiprocessing import Process - + class TDTestCase: def init(self,conn ,logSql): @@ -69,7 +69,7 @@ class TDTestCase: dnodenumbers=int(dnodenumbers) mnodeNums=int(mnodeNums) dbNumbers = int(dnodenumbers * restartNumber) - + tdLog.info("first check dnode and mnode") tdSql.query("show dnodes;") tdSql.checkData(0,1,'%s:6030'%self.host) @@ -84,7 +84,7 @@ class TDTestCase: tdSql.execute("create mnode on dnode 3") clusterComCheck.checkMnodeStatus(3) - # add some error operations and + # add some error operations and tdLog.info("Confirm the status of the dnode again") tdSql.error("create mnode on dnode 2") tdSql.query("show dnodes;") @@ -93,10 +93,10 @@ class TDTestCase: # restart all taosd tdDnodes=cluster.dnodes - tdLog.info("Take turns stopping all dnodes ") + tdLog.info("Take turns stopping all dnodes ") # seperate vnode and mnode in different dnodes. # create database and stable - stopcount =0 + stopcount =0 while stopcount <= 2: tdLog.info(" restart loop: %d"%stopcount ) for i in range(dnodenumbers): @@ -106,10 +106,10 @@ class TDTestCase: clusterComCheck.checkDnodes(dnodenumbers) clusterComCheck.checkMnodeStatus(3) - def run(self): + def run(self): # print(self.master_dnode.cfgDict) self.fiveDnodeThreeMnode(5,3,1) - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/6-cluster/clusterCommonCheck.py b/tests/system-test/6-cluster/clusterCommonCheck.py index b758e6e71f1c045b090c641b20df10529e2a841a..196b362f45fc7f94b440ba71eaf237be2b03adf0 100644 --- a/tests/system-test/6-cluster/clusterCommonCheck.py +++ b/tests/system-test/6-cluster/clusterCommonCheck.py @@ -48,10 +48,10 @@ class ClusterComCheck: if tdSql.queryResult[i][4] == "ready": status+=1 tdLog.info(status) - + if status == dnodeNumbers: tdLog.success("it find cluster with %d dnodes and check that all cluster dnodes are ready within 30s! " %dnodeNumbers) - return True + return True count+=1 time.sleep(1) else: @@ -77,15 +77,15 @@ class ClusterComCheck: def checkDb(self,dbNumbers,restartNumber,dbNameIndex): count=0 alldbNumbers=(dbNumbers*restartNumber)+2 - while count < 5: + while count < 5: query_status=0 for j in range(dbNumbers): for i in range(alldbNumbers): tdSql.query("show databases;") - if "%s_%d"%(dbNameIndex,j) == tdSql.queryResult[i][0] : + if "%s_%d"%(dbNameIndex,j) == tdSql.queryResult[i][0] : if tdSql.queryResult[i][15] == "ready": query_status+=1 - tdLog.debug("check %s_%d that status is ready "%(dbNameIndex,j)) + tdLog.debug("check %s_%d that status is ready "%(dbNameIndex,j)) else: continue # print(query_status) @@ -107,7 +107,7 @@ class ClusterComCheck: for i in range(stableCount): tdSql.query("select count(*) from %s%d"%(stbname,i)) tdSql.checkData(0,0,rowsPerSTable) - return + return def checkMnodeStatus(self,mnodeNums): self.mnodeNums=int(mnodeNums) @@ -118,15 +118,15 @@ class ClusterComCheck: while count < 10: time.sleep(1) tdSql.query("show mnodes;") - if tdSql.checkRows(self.mnodeNums) : + if tdSql.checkRows(self.mnodeNums) : tdLog.success("cluster has %d mnodes" %self.mnodeNums ) if self.mnodeNums == 1: if tdSql.queryResult[0][2]== 'leader' and tdSql.queryResult[0][3]== 'ready' : tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums) - return True - count+=1 - elif self.mnodeNums == 3 : + return True + count+=1 + elif self.mnodeNums == 3 : if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' : if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' : if tdSql.queryResult[2][2]=='follower' and tdSql.queryResult[2][3]== 'ready' : @@ -141,9 +141,9 @@ class ClusterComCheck: if tdSql.queryResult[0][2]=='follower' and tdSql.queryResult[0][3]== 'ready' : if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' : tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums) - return True + return True count+=1 - elif self.mnodeNums == 2 : + elif self.mnodeNums == 2 : if tdSql.queryResult[0][2]=='leader' and tdSql.queryResult[0][3]== 'ready' : if tdSql.queryResult[1][2]=='follower' and tdSql.queryResult[1][3]== 'ready' : tdLog.success("%d mnodes is ready in 10s"%self.mnodeNums) @@ -157,7 +157,7 @@ class ClusterComCheck: tdLog.debug(tdSql.queryResult) tdLog.exit("cluster of %d mnodes is not ready in 10s " %self.mnodeNums) - + def check3mnodeoff(self,offlineDnodeNo,mnodeNums=3): @@ -224,7 +224,7 @@ class ClusterComCheck: else: tdLog.debug(tdSql.queryResult) tdLog.exit("stop mnodes on dnode %d failed in 10s ") - + diff --git a/tests/system-test/6-cluster/clusterCommonCreate.py b/tests/system-test/6-cluster/clusterCommonCreate.py index 667e5e383ebe1b9a3c6e4630e4b0a20d39e5d77f..299829144e5ddafceacc828614fe1d7a0d2ed98a 100644 --- a/tests/system-test/6-cluster/clusterCommonCreate.py +++ b/tests/system-test/6-cluster/clusterCommonCreate.py @@ -37,23 +37,23 @@ class ClusterComCreate: tdSql.init(conn.cursor()) # tdSql.init(conn.cursor(), logSql) # output sql.txt file - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) - tdSql.query("drop table if exists %s.notifyinfo "%(cdbName)) + tdSql.query("drop table if exists %s.notifyinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) tdSql.query("create table %s.notifyinfo (ts timestamp, cmdid int, consumerid int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -68,11 +68,11 @@ class ClusterComCreate: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -82,14 +82,14 @@ class ClusterComCreate: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -142,7 +142,7 @@ class ClusterComCreate: tdLog.debug("create table if not exists %s.%s_%d (ts timestamp, c1 int, c2 int, c3 binary(16)) tags(t1 int, t2 binary(32))"%(dbNameIndex, stbNameIndex,i)) tsql.execute("create table if not exists %s.%s_%d (ts timestamp, c1 int, c2 int, c3 binary(16)) tags(t1 int, t2 binary(32))"%(dbNameIndex, stbNameIndex,i)) tdLog.debug("complete to create %s.%s_%d" %(dbNameIndex, stbNameIndex,i)) - return + return def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1): tsql.execute("use %s" %dbName) @@ -153,14 +153,14 @@ class ClusterComCreate: tagValue = 'beijing' if (i % 2 == 0): tagValue = 'shanghai' - + sql += " %s_%d using %s tags(%d, '%s')"%(ctbPrefix,i,stbName,i+1, tagValue) if (i > 0) and (i%100 == 0): tsql.execute(sql) sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -189,7 +189,7 @@ class ClusterComCreate: #print("insert sql:%s"%sql) tsql.execute(sql) tdLog.debug("insert data ............ [OK]") - return + return def insert_data_1(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs): tdLog.debug("start to insert data ............") @@ -235,7 +235,7 @@ class ClusterComCreate: ctbDict[i] = 0 #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfCtb = 0 + rowsOfCtb = 0 while rowsOfCtb < rowsPerTbl: for i in range(ctbNum): sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) @@ -262,7 +262,7 @@ class ClusterComCreate: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) for j in range(rowsPerTbl): @@ -294,7 +294,7 @@ class ClusterComCreate: for i in range(ctbNum): tbName = '%s%s'%(ctbPrefix,i) tdCom.insert_rows(tsql,dbname=paraDict["dbName"],tbname=tbName,start_ts_value=paraDict['startTs'],count=paraDict['rowsPerTbl']) - return + return def threadFunction(self, **paraDict): # create new connector for new tdSql instance in my thread diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py index 14494f1171aaf31d1bac26c2fe8d2fad483f0954..e93b13278ba577789aa4116de903a6300af5e988 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -95,7 +95,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(5): tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) tdSql.query("show stables") @@ -126,7 +126,7 @@ class TDTestCase: return taos.connect(host=host, port=int(port), config=config_dir) - def run(self): + def run(self): self.check_setup_cluster_status() self.create_db_check_vgroups() @@ -135,4 +135,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py index 9a21dab855276ef569a917c36dccb61872aa2d65..7638d8227f471c96f0d70a9e954b93725d347319 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -26,9 +26,9 @@ class TDTestCase: self.dnode_list = {} self.ts = 1483200000000 self.db_name ='testdb' - self.replica = 1 + self.replica = 1 self.vgroups = 2 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 def getBuildPath(self): @@ -101,7 +101,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(5): tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) tdSql.query("show stables") @@ -145,7 +145,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(tb_nums): sub_tbname = "sub_tb_{}".format(i) tdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) @@ -164,7 +164,7 @@ class TDTestCase: tdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) tdSql.checkRows(tb_nums) - def run(self): + def run(self): self.check_setup_cluster_status() self.create_db_check_vgroups() self.create_db_replica_1_insertdatas(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums) @@ -176,4 +176,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py new file mode 100644 index 0000000000000000000000000000000000000000..02d944b08f6087a3fe3bb71f8874e2d6509bcf98 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py @@ -0,0 +1,202 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 1 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 1000 + self.query_times = 500 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + ts = self.ts + 1000*row_num + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + + def check_insert_status(self, newTdSql ,dbname, tb_nums , row_nums): + # newTdSql=tdCom.newTdSql() + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + newTdSql=tdCom.newTdSql() + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + self.check_insert_status( newTdSql , db_name, tb_nums , row_nums) + + def run(self): + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) + reading.start() + + writing.join() + reading.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py index 0b6ab8721a9d53cbdb9c42ea53d386ac44c17008..5d112f435266b9fbf3f6bb0b42ff8482cd7b2686 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -26,9 +26,9 @@ class TDTestCase: self.dnode_list = {} self.ts = 1483200000000 self.db_name ='testdb' - self.replica = 3 + self.replica = 3 self.vgroups = 2 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 def getBuildPath(self): @@ -101,7 +101,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(5): tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) tdSql.query("show stables") @@ -145,7 +145,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(tb_nums): sub_tbname = "sub_tb_{}".format(i) tdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) @@ -164,7 +164,7 @@ class TDTestCase: tdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) tdSql.checkRows(tb_nums) - def run(self): + def run(self): self.check_setup_cluster_status() self.create_db_check_vgroups() self.create_db_replica_3_insertdatas(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums) @@ -176,4 +176,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py new file mode 100644 index 0000000000000000000000000000000000000000..3d01015af66ad99320fd9b671595a2362f214fc8 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py @@ -0,0 +1,428 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import datetime +import inspect +import time +import socket +import subprocess +import threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.ts_step =1000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 100 + self.stop_dnode_id = None + self.loop_restart_times = 5 + self.current_thread = None + self.max_restart_time = 10 + self.try_check_times = 10 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_database(self, dbname, replica_num ,vgroup_nums ): + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + tdSql.execute(drop_db_sql) + tdSql.execute(create_db_sql) + tdSql.execute("use {}".format(dbname)) + + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): + tdSql.execute("use {}".format(dbname)) + tdSql.execute( + '''create table {} + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + '''.format(stablename) + ) + + for i in range(tb_nums): + sub_tbname = "sub_{}_{}".format(stablename,i) + tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) + # insert datas about new database + + for row_num in range(row_nums): + ts = self.ts + self.ts_step*row_num + tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) + + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): + + tdSql.execute("use {}".format(dbname)) + + for row_num in range(append_nums): + tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename)) + os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) + + def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): + + tdSql.execute("use {}".format(dbname)) + + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + + status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) + + count = 0 + while not status_OK : + if count > self.try_check_times: + os.system("taos -s ' show {}.vgroups; '".format(dbname)) + tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname)) + break + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) + tdLog.debug(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) + count += 1 + + + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) + count = 0 + while not status_OK : + if count > self.try_check_times: + os.system("taos -s ' show {}.vgroups;'".format(dbname)) + tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname)) + break + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) + tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) + count += 1 + + def _get_stop_dnode_id(self,dbname ,dnode_role): + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] + # print(vgroup_info) + for ind ,role in enumerate(leader_infos): + if role == dnode_role: + # print(ind,leader_infos) + self.stop_dnode_id = leader_infos[ind-1] + break + + return self.stop_dnode_id + + def wait_stop_dnode_OK(self ,newTdSql ): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="offline": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) + + def wait_start_dnode_OK(self , newTdSql): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="ready": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id)) + + def _parse_datetime(self,timestr): + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f') + except ValueError: + pass + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S') + except ValueError: + pass + + def mycheckRowCol(self, sql, row, col): + caller = inspect.getframeinfo(inspect.stack()[2][0]) + if row < 0: + args = (caller.filename, caller.lineno, sql, row) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args) + if col < 0: + args = (caller.filename, caller.lineno, sql, row) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args) + if row > tdSql.queryRows: + args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args) + if col > tdSql.queryCols: + args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args) + + def mycheckData(self, sql ,row, col, data): + check_status = True + self.mycheckRowCol(sql ,row, col) + if tdSql.queryResult[row][col] != data: + if tdSql.cursor.istype(col, "TIMESTAMP"): + # suppose user want to check nanosecond timestamp if a longer data passed + if (len(data) >= 28): + if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data): + tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + else: + if tdSql.queryResult[row][col] == self._parse_datetime(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + + if str(tdSql.queryResult[row][col]) == str(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001: + tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data) + tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + + check_status = False + + if data is None: + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + elif isinstance(data, str): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + # elif isinstance(data, datetime.date): + # tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + # (sql, row, col, tdSql.queryResult[row][col], data)) + elif isinstance(data, float): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + else: + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" % + (sql, row, col, tdSql.queryResult[row][col], data)) + + return check_status + + def mycheckRows(self, sql, expectRows): + check_status = True + if len(tdSql.queryResult) == expectRows: + tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows)) + return True + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows) + tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + check_status = False + return check_status + + def force_stop_dnode(self, dnode_id ): + + port = None + for k ,v in self.dnode_list.items(): + if v[0] == dnode_id: + port = k.split(":")[-1] + else: + continue + + if port: + tdLog.notice(" ==== dnode {} will be force stop by kill -9 ====".format(dnode_id)) + psCmd = '''netstat -anp|grep -w LISTEN|grep -w %s |grep -o "LISTEN.*"|awk '{print $2}'|cut -d/ -f1|head -n1''' %(port) + processID = subprocess.check_output( + psCmd, shell=True).decode("utf-8") + ps_kill_taosd = ''' kill -9 {} '''.format(processID) + # print(ps_kill_taosd) + os.system(ps_kill_taosd) + else : + tdLog.exit(" ==== port of dnode {} not found ====".format(dnode_id)) + + def stop_All(self): + + tdDnodes = cluster.dnodes + # newTdSql=tdCom.newTdSql() + # ==== stop all dnode ===== + for k ,v in self.dnode_list.items(): + dnode_id = v[0] + # tdDnodes[dnode_id-1].stoptaosd() + self.force_stop_dnode(dnode_id) + # self.wait_stop_dnode_OK(newTdSql) + + def start_All(self): + tdDnodes = cluster.dnodes + # newTdSql=tdCom.newTdSql() + for k ,v in self.dnode_list.items(): + dnode_id = v[0] + start = time.time() + tdDnodes[dnode_id-1].starttaosd() + # self.wait_start_dnode_OK(newTdSql) + end = time.time() + time_cost = int(end -start) + if time_cost > self.max_restart_time: + tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) + + + + def stop_All_dnodes_check_datas(self): + + # stop follower and insert datas , update tables and create new stables + self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1) + self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums ) + self.check_insert_rows(self.db_name ,'stb1' ,tb_nums = self.tb_nums , row_nums= self.row_nums ,append_rows=0) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + for i in range(self.loop_restart_times): + # begin to stop All taosd + self.stop_All() + # begin to start All taosd + self.start_All() + + tdLog.debug(" ==== cluster has restart , this is {}_th restart cluster ==== ".format(i)) + + self.check_insert_rows(self.db_name ,'stb1' ,tb_nums = self.tb_nums , row_nums= self.row_nums ,append_rows=0) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + self.append_rows_of_exists_tables(self.db_name ,"stb1" , "sub_stb1_0" , 100 ) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + + def run(self): + + # basic insert and check of cluster + self.check_setup_cluster_status() + self.stop_All_dnodes_check_datas() + + + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py new file mode 100644 index 0000000000000000000000000000000000000000..3649617c21e9c82e9b24ef2103d88517ef2695e1 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py @@ -0,0 +1,204 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 1000 + self.query_times = 1000 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + if row_num %100 ==0: + tdLog.info(" === writing is going now === ") + ts = self.ts + 1000*row_num + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + + def check_insert_status(self, newTdSql ,dbname, tb_nums , row_nums): + + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + newTdSql=tdCom.newTdSql() + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) + + def run(self): + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) + reading.start() + + writing.join() + reading.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py new file mode 100644 index 0000000000000000000000000000000000000000..db05eca9ce52d93cf70fd0f98d29f0ecb6cb19e5 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py @@ -0,0 +1,355 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 500 + self.max_restart_time = 20 + self.restart_server_times = 10 + self.dnode_index = 0 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + if row_num % (int(row_nums*0.1)) == 0 : + tdLog.notice( " === database {} writing records {} rows".format(dbname , row_num ) ) + ts = self.ts + 1000*row_num + + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + def _get_stop_dnode_id(self): + + + dnode_lists = list(set(self.dnode_list.keys()) -set(self.mnode_list.keys())) + # print(dnode_lists) + self.stop_dnode_id = self.dnode_list[dnode_lists[self.dnode_index % 3]][0] + + self.dnode_index += 1 + + return self.stop_dnode_id + + def wait_stop_dnode_OK(self , newTdSql): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + + while status !="offline": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) + + def wait_start_dnode_OK(self , newTdSql ): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="ready": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id)) + + def get_leader_infos(self , newTdSql , dbname): + + # newTdSql=tdCom.newTdSql() + newTdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = newTdSql.queryResult + + leader_infos = set() + for vgroup_info in vgroup_infos: + leader_infos.add(vgroup_info[3:-4]) + + return leader_infos + + def check_revote_leader_success(self, dbname, before_leader_infos , after_leader_infos): + + check_status = False + vote_act = set(set(after_leader_infos)-set(before_leader_infos)) + if not vote_act: + print("=======before_revote_leader_infos ======\n" , before_leader_infos) + print("=======after_revote_leader_infos ======\n" , after_leader_infos) + tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====") + else: + for vgroup_info in vote_act: + for ind , role in enumerate(vgroup_info): + if role==self.stop_dnode_id: + + if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: + tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1])) + check_status = True + elif vgroup_info[ind+1] !="offline": + tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id)) + else: + continue + break + return check_status + + + def check_insert_status(self, newTdSql , dbname, tb_nums , row_nums): + + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + + newTdSql=tdCom.newTdSql() + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) + + + def loop_restart_follower_constantly(self, times , db_name): + + tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() + + for loop_time in range(times): + + self.stop_dnode_id = self._get_stop_dnode_id() + + # print(self.stop_dnode_id) + # begin stop dnode + + # before_leader_infos = self.get_leader_infos( newTdSql ,db_name) + tdDnodes[self.stop_dnode_id-1].stoptaosd() + self.wait_stop_dnode_OK(newTdSql) + + # start = time.time() + # # get leader info after stop + # after_leader_infos = self.get_leader_infos(newTdSql , db_name) + + # revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) + + # # append rows of stablename when dnode stop make sure revote leaders + + # while not revote_status: + # after_leader_infos = self.get_leader_infos(newTdSql , db_name) + # revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) + + # end = time.time() + # time_cost = end - start + # tdLog.notice(" ==== revote leader of database {} cost time {} ====".format(db_name , time_cost)) + + tdLog.notice(" === this is {}_th restart taosd === ".format(loop_time)) + + # begin start dnode + start = time.time() + tdDnodes[self.stop_dnode_id-1].starttaosd() + self.wait_start_dnode_OK(newTdSql) + time.sleep(5) + end = time.time() + time_cost = int(end -start) + if time_cost > self.max_restart_time: + tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) + + + + def run(self): + + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + restart_servers = threading.Thread(target = self.loop_restart_follower_constantly, args = (self.restart_server_times ,self.db_name)) + restart_servers.start() + + # reading = threading.Thread(target = self.loop_query_constantly, args=(1000,self.db_name , self.tb_nums , self.row_nums)) + # reading.start() + + writing.join() + # reading.join() + restart_servers.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd5ec7d462ad3a37e73ef7167cb22144fca6149 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py @@ -0,0 +1,316 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 10 + self.tb_nums = 10 + self.row_nums = 100 + self.max_restart_time = 20 + self.restart_server_times = 5 + self.query_times = 100 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + if row_num % (int(row_nums*0.1)) == 0 : + tdLog.notice( " === database {} writing records {} rows".format(dbname , row_num ) ) + ts = self.ts + 1000*row_num + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + def _get_stop_dnode_id(self,dbname ,dnode_role): + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] + # print(vgroup_info) + for ind ,role in enumerate(leader_infos): + if role == dnode_role: + # print(ind,leader_infos) + self.stop_dnode_id = leader_infos[ind-1] + break + + return self.stop_dnode_id + + def wait_stop_dnode_OK(self , newTdSql): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + + while status !="offline": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) + + def wait_start_dnode_OK(self , newTdSql ): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="ready": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id)) + + def check_insert_status(self, newTdSql , dbname, tb_nums , row_nums): + + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + + newTdSql=tdCom.newTdSql() + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) + + + def loop_restart_follower_constantly(self, times , db_name): + + tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() + + for loop_time in range(times): + + self.stop_dnode_id = self._get_stop_dnode_id(db_name , "follower") + + print(self.stop_dnode_id) + # begin stop dnode + start = time.time() + tdDnodes[self.stop_dnode_id-1].stoptaosd() + self.wait_stop_dnode_OK(newTdSql) + + tdLog.notice(" === this is {}_th restart taosd === ".format(loop_time)) + + # begin start dnode + tdDnodes[self.stop_dnode_id-1].starttaosd() + self.wait_start_dnode_OK(newTdSql) + end = time.time() + time_cost = int(end -start) + if time_cost > self.max_restart_time: + tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) + + + + def run(self): + + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + restart_servers = threading.Thread(target = self.loop_restart_follower_constantly, args = (self.restart_server_times ,self.db_name)) + restart_servers.start() + + reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) + reading.start() + + writing.join() + reading.join() + restart_servers.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py new file mode 100644 index 0000000000000000000000000000000000000000..cbb007d961cae56d62d6ae1cd789af0b768d1807 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py @@ -0,0 +1,369 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 10 + self.tb_nums = 10 + self.row_nums = 100 + self.max_restart_time = 20 + self.restart_server_times = 10 + self.query_times = 100 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + if row_num % (int(row_nums*0.1)) == 0 : + tdLog.notice( " === database {} writing records {} rows".format(dbname , row_num ) ) + ts = self.ts + 1000*row_num + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + def _get_stop_dnode_id(self,dbname ,dnode_role): + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] + # print(vgroup_info) + for ind ,role in enumerate(leader_infos): + if role == dnode_role: + # print(ind,leader_infos) + self.stop_dnode_id = leader_infos[ind-1] + break + + return self.stop_dnode_id + + def wait_stop_dnode_OK(self , newTdSql): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + + while status !="offline": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) + + def wait_start_dnode_OK(self , newTdSql ): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="ready": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id)) + + def get_leader_infos(self , newTdSql , dbname): + + # newTdSql=tdCom.newTdSql() + newTdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = newTdSql.queryResult + + leader_infos = set() + for vgroup_info in vgroup_infos: + leader_infos.add(vgroup_info[3:-4]) + + return leader_infos + + def check_revote_leader_success(self, dbname, before_leader_infos , after_leader_infos): + + check_status = False + vote_act = set(set(after_leader_infos)-set(before_leader_infos)) + if not vote_act: + print("=======before_revote_leader_infos ======\n" , before_leader_infos) + print("=======after_revote_leader_infos ======\n" , after_leader_infos) + tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====") + else: + for vgroup_info in vote_act: + for ind , role in enumerate(vgroup_info): + if role==self.stop_dnode_id: + + if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: + tdLog.notice(" === revote leader ok , leader is {} now ====".format(vgroup_info[list(vgroup_info).index("leader")-1])) + check_status = True + elif vgroup_info[ind+1] !="offline": + tdLog.notice(" === dnode {} should be offline ".format(self.stop_dnode_id)) + else: + continue + break + return check_status + + + def check_insert_status(self, newTdSql , dbname, tb_nums , row_nums): + + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + + newTdSql=tdCom.newTdSql() + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) + + + def loop_restart_follower_constantly(self, times , db_name): + + tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() + + for loop_time in range(times): + + self.stop_dnode_id = self._get_stop_dnode_id(db_name , "leader") + + # print(self.stop_dnode_id) + # begin stop dnode + start = time.time() + + before_leader_infos = self.get_leader_infos( newTdSql ,db_name) + tdDnodes[self.stop_dnode_id-1].stoptaosd() + self.wait_stop_dnode_OK(newTdSql) + + start = time.time() + # get leader info after stop + after_leader_infos = self.get_leader_infos(newTdSql , db_name) + + revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) + + # append rows of stablename when dnode stop make sure revote leaders + + while not revote_status: + after_leader_infos = self.get_leader_infos(newTdSql , db_name) + revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) + + end = time.time() + time_cost = end - start + tdLog.notice(" ==== revote leader of database {} cost time {} ====".format(db_name , time_cost)) + + tdLog.notice(" === this is {}_th restart taosd === ".format(loop_time)) + + # begin start dnode + tdDnodes[self.stop_dnode_id-1].starttaosd() + self.wait_start_dnode_OK(newTdSql) + end = time.time() + time_cost = int(end -start) + if time_cost > self.max_restart_time: + tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) + + + + def run(self): + + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + restart_servers = threading.Thread(target = self.loop_restart_follower_constantly, args = (self.restart_server_times ,self.db_name)) + restart_servers.start() + + reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) + reading.start() + + writing.join() + reading.join() + restart_servers.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py new file mode 100644 index 0000000000000000000000000000000000000000..63c4942c9e12b1b00cd37484abfe7d07c663cce3 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py @@ -0,0 +1,407 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import datetime +import inspect +import time +import socket +import subprocess +import threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.ts_step =1000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 100 + self.stop_dnode_id = None + self.loop_restart_times = 5 + self.current_thread = None + self.max_restart_time = 10 + self.try_check_times = 10 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_database(self, dbname, replica_num ,vgroup_nums ): + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + tdSql.execute(drop_db_sql) + tdSql.execute(create_db_sql) + tdSql.execute("use {}".format(dbname)) + + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): + tdSql.execute("use {}".format(dbname)) + tdSql.execute( + '''create table {} + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + '''.format(stablename) + ) + + for i in range(tb_nums): + sub_tbname = "sub_{}_{}".format(stablename,i) + tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) + # insert datas about new database + + for row_num in range(row_nums): + ts = self.ts + self.ts_step*row_num + tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) + + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): + + tdSql.execute("use {}".format(dbname)) + + for row_num in range(append_nums): + tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + tdLog.notice(" ==== append new rows of table {} belongs to stable {} execute end =====".format(tbname,stablename)) + os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) + + def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): + + tdSql.execute("use {}".format(dbname)) + + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + + status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) + + count = 0 + while not status_OK : + if count > self.try_check_times: + os.system("taos -s ' show {}.vgroups; '".format(dbname)) + tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname)) + break + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) + tdLog.debug(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) + count += 1 + + + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) + count = 0 + while not status_OK : + if count > self.try_check_times: + os.system("taos -s ' show {}.vgroups;'".format(dbname)) + tdLog.exit(" ==== check insert rows failed after {} try check {} times of database {}".format(count , self.try_check_times ,dbname)) + break + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + while not tdSql.queryResult: + time.sleep(0.1) + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) + status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) + tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) + count += 1 + + def _get_stop_dnode_id(self,dbname ,dnode_role): + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] + # print(vgroup_info) + for ind ,role in enumerate(leader_infos): + if role == dnode_role: + # print(ind,leader_infos) + self.stop_dnode_id = leader_infos[ind-1] + break + + return self.stop_dnode_id + + def wait_stop_dnode_OK(self ,newTdSql ): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="offline": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) + + def wait_start_dnode_OK(self , newTdSql): + + def _get_status(): + # newTdSql=tdCom.newTdSql() + status = "" + newTdSql.query("show dnodes") + dnode_infos = newTdSql.queryResult + for dnode_info in dnode_infos: + id = dnode_info[0] + dnode_status = dnode_info[4] + if id == self.stop_dnode_id: + status = dnode_status + break + return status + + status = _get_status() + while status !="ready": + time.sleep(0.1) + status = _get_status() + # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) + tdLog.notice("==== stop_dnode has restart , id is {} ====".format(self.stop_dnode_id)) + + def _parse_datetime(self,timestr): + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S.%f') + except ValueError: + pass + try: + return datetime.datetime.strptime(timestr, '%Y-%m-%d %H:%M:%S') + except ValueError: + pass + + def mycheckRowCol(self, sql, row, col): + caller = inspect.getframeinfo(inspect.stack()[2][0]) + if row < 0: + args = (caller.filename, caller.lineno, sql, row) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is smaller than zero" % args) + if col < 0: + args = (caller.filename, caller.lineno, sql, row) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is smaller than zero" % args) + if row > tdSql.queryRows: + args = (caller.filename, caller.lineno, sql, row, tdSql.queryRows) + tdLog.exit("%s(%d) failed: sql:%s, row:%d is larger than queryRows:%d" % args) + if col > tdSql.queryCols: + args = (caller.filename, caller.lineno, sql, col, tdSql.queryCols) + tdLog.exit("%s(%d) failed: sql:%s, col:%d is larger than queryCols:%d" % args) + + def mycheckData(self, sql ,row, col, data): + check_status = True + self.mycheckRowCol(sql ,row, col) + if tdSql.queryResult[row][col] != data: + if tdSql.cursor.istype(col, "TIMESTAMP"): + # suppose user want to check nanosecond timestamp if a longer data passed + if (len(data) >= 28): + if pd.to_datetime(tdSql.queryResult[row][col]) == pd.to_datetime(data): + tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + else: + if tdSql.queryResult[row][col] == self._parse_datetime(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + + if str(tdSql.queryResult[row][col]) == str(data): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + elif isinstance(data, float) and abs(tdSql.queryResult[row][col] - data) <= 0.000001: + tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" % + (sql, row, col, tdSql.queryResult[row][col], data)) + return + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, row, col, tdSql.queryResult[row][col], data) + tdLog.info("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args) + + check_status = False + + if data is None: + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + elif isinstance(data, str): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + # elif isinstance(data, datetime.date): + # tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + # (sql, row, col, tdSql.queryResult[row][col], data)) + elif isinstance(data, float): + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" % + (sql, row, col, tdSql.queryResult[row][col], data)) + else: + tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" % + (sql, row, col, tdSql.queryResult[row][col], data)) + + return check_status + + def mycheckRows(self, sql, expectRows): + check_status = True + if len(tdSql.queryResult) == expectRows: + tdLog.info("sql:%s, queryRows:%d == expect:%d" % (sql, len(tdSql.queryResult), expectRows)) + return True + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, len(tdSql.queryResult), expectRows) + tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) + check_status = False + return check_status + + def stop_All(self): + + tdDnodes = cluster.dnodes + # newTdSql=tdCom.newTdSql() + # ==== stop all dnode ===== + for k ,v in self.dnode_list.items(): + dnode_id = v[0] + tdDnodes[dnode_id-1].stoptaosd() + # self.wait_stop_dnode_OK(newTdSql) + + def start_All(self): + tdDnodes = cluster.dnodes + # newTdSql=tdCom.newTdSql() + for k ,v in self.dnode_list.items(): + dnode_id = v[0] + start = time.time() + tdDnodes[dnode_id-1].starttaosd() + # self.wait_start_dnode_OK(newTdSql) + end = time.time() + time_cost = int(end -start) + if time_cost > self.max_restart_time: + tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) + + + + def stop_All_dnodes_check_datas(self): + + # stop follower and insert datas , update tables and create new stables + self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1) + self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums ) + self.check_insert_rows(self.db_name ,'stb1' ,tb_nums = self.tb_nums , row_nums= self.row_nums ,append_rows=0) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + for i in range(self.loop_restart_times): + # begin to stop All taosd + self.stop_All() + # begin to start All taosd + self.start_All() + + tdLog.debug(" ==== cluster has restart , this is {}_th restart cluster ==== ".format(i)) + + self.check_insert_rows(self.db_name ,'stb1' ,tb_nums = self.tb_nums , row_nums= self.row_nums ,append_rows=0) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + self.append_rows_of_exists_tables(self.db_name ,"stb1" , "sub_stb1_0" , 100 ) + os.system(" taos -s ' select count(*) from {}.{} ;'".format(self.db_name ,'stb1' )) + + + def run(self): + + # basic insert and check of cluster + self.check_setup_cluster_status() + self.stop_All_dnodes_check_datas() + + + + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py index 3d6b548bddb1edf9bcdf9bc8febadcc74f1ab8e3..c608c93f5e4e07c9bb6c2872cc50748c45c85438 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -30,9 +30,9 @@ class TDTestCase: self.ts = 1483200000000 self.ts_step =1000 self.db_name ='testdb' - self.replica = 3 + self.replica = 3 self.vgroups = 1 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.stop_dnode_id = None self.loop_restart_times = 5 @@ -110,7 +110,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(5): tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) tdSql.query("show stables") @@ -142,7 +142,7 @@ class TDTestCase: tdSql.execute(drop_db_sql) tdSql.execute(create_db_sql) tdSql.execute("use {}".format(dbname)) - + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): tdSql.execute("use {}".format(dbname)) tdSql.execute( @@ -151,7 +151,7 @@ class TDTestCase: tags (t1 int) '''.format(stablename) ) - + for i in range(tb_nums): sub_tbname = "sub_{}_{}".format(stablename,i) tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) @@ -162,11 +162,11 @@ class TDTestCase: tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) - + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): - + tdSql.execute("use {}".format(dbname)) - + for row_num in range(append_nums): tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") @@ -174,9 +174,9 @@ class TDTestCase: os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): - + tdSql.execute("use {}".format(dbname)) - + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: @@ -184,8 +184,8 @@ class TDTestCase: tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) - - count = 0 + + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups; '".format(dbname)) @@ -199,14 +199,14 @@ class TDTestCase: status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) tdLog.debug(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) count += 1 - + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: time.sleep(0.1) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) - count = 0 + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups;'".format(dbname)) @@ -220,25 +220,43 @@ class TDTestCase: status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) tdLog.debug(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + + + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: - leader_infos = vgroup_info[3:-4] + leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='follower': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self ,newTdSql): + def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -249,7 +267,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="offline": @@ -258,10 +276,11 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {} ====".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -271,7 +290,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="ready": @@ -365,29 +384,34 @@ class TDTestCase: tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) check_status = False return check_status - + def sync_run_case(self): # stop follower and insert datas , update tables and create new stables tdDnodes=cluster.dnodes + newTdSql = tdCom.newTdSql() for loop in range(self.loop_restart_times): db_name = "sync_db_{}".format(loop) stablename = 'stable_{}'.format(loop) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) - self.stop_dnode_id = self._get_stop_dnode_id(db_name) + + self.stop_dnode_id = self._get_stop_dnode_id(db_name , "follower") + # check rows of datas - + self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0) - - # begin stop dnode + + # begin stop dnode start = time.time() tdDnodes[self.stop_dnode_id-1].stoptaosd() + - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) + + + # append rows of stablename when dnode stop - # append rows of stablename when dnode stop - tbname = "sub_{}_{}".format(stablename , 0) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) @@ -400,26 +424,26 @@ class TDTestCase: tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - # begin start dnode + # begin start dnode tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end -start) if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - - # create new stables again + + # create new stables again tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - + def unsync_run_case(self): def _restart_dnode_of_db_unsync(dbname): start = time.time() tdDnodes=cluster.dnodes - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + self.stop_dnode_id = self._get_stop_dnode_id(dbname ,"follower") # begin restart dnode tdDnodes[self.stop_dnode_id-1].stoptaosd() self.wait_stop_dnode_OK() @@ -427,18 +451,18 @@ class TDTestCase: self.wait_start_dnode_OK() end = time.time() time_cost = int(end-start) - + if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - + def _create_threading(dbname): self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,)) return self.current_thread - + ''' - in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive + in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive ''' tdDnodes=cluster.dnodes for loop in range(self.loop_restart_times): @@ -449,7 +473,7 @@ class TDTestCase: tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name)) - # create sync threading and start it + # create sync threading and start it self.current_thread = _create_threading(db_name) self.current_thread.start() @@ -468,7 +492,7 @@ class TDTestCase: tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - # create new stables again + # create new stables again tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) @@ -477,7 +501,7 @@ class TDTestCase: self.current_thread.join() - def run(self): + def run(self): # basic insert and check of cluster self.check_setup_cluster_status() @@ -485,7 +509,7 @@ class TDTestCase: self.sync_run_case() # self.unsync_run_case() - + def stop(self): @@ -493,4 +517,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py index 8ec634987980dd445bb206c4ba846dc7c95e0738..25c26c7288efdb20d1156a09e325e151fd176f62 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -30,9 +30,9 @@ class TDTestCase: self.ts = 1483200000000 self.ts_step =1000 self.db_name ='testdb' - self.replica = 3 + self.replica = 3 self.vgroups = 1 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.stop_dnode_id = None self.loop_restart_times = 5 @@ -110,7 +110,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(5): tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) tdSql.query("show stables") @@ -142,7 +142,7 @@ class TDTestCase: tdSql.execute(drop_db_sql) tdSql.execute(create_db_sql) tdSql.execute("use {}".format(dbname)) - + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): tdSql.execute("use {}".format(dbname)) tdSql.execute( @@ -151,7 +151,7 @@ class TDTestCase: tags (t1 int) '''.format(stablename) ) - + for i in range(tb_nums): sub_tbname = "sub_{}_{}".format(stablename,i) tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) @@ -162,11 +162,11 @@ class TDTestCase: tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) - + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): - + tdSql.execute("use {}".format(dbname)) - + for row_num in range(append_nums): tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") @@ -174,9 +174,9 @@ class TDTestCase: os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): - + tdSql.execute("use {}".format(dbname)) - + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: @@ -184,8 +184,8 @@ class TDTestCase: tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) - - count = 0 + + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups; '".format(dbname)) @@ -199,14 +199,14 @@ class TDTestCase: status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) count += 1 - + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: time.sleep(0.1) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) - count = 0 + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups;'".format(dbname)) @@ -220,26 +220,42 @@ class TDTestCase: status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - - def _get_stop_dnode_id(self,dbname): + + def _get_stop_dnode_id(self,dbname ,dnode_role): + tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: - leader_infos = vgroup_info[3:-4] + leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='follower': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -250,7 +266,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="offline": @@ -259,10 +275,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): - + def wait_start_dnode_OK(self ,newTdSql): + def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -272,7 +288,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="ready": @@ -366,29 +382,33 @@ class TDTestCase: tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) check_status = False return check_status - + def sync_run_case(self): # stop follower and insert datas , update tables and create new stables tdDnodes=cluster.dnodes + newTdSql=tdCom.newTdSql() for loop in range(self.loop_restart_times): db_name = "sync_db_{}".format(loop) stablename = 'stable_{}'.format(loop) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) - self.stop_dnode_id = self._get_stop_dnode_id(db_name) + + self.stop_dnode_id = self._get_stop_dnode_id(db_name ,"follower") + # print("dnode_id:" , self.stop_dnode_id ) # check rows of datas - + self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0) - - # begin stop dnode + + # begin stop dnode start = time.time() tdDnodes[self.stop_dnode_id-1].stoptaosd() - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) + + + # append rows of stablename when dnode stop - # append rows of stablename when dnode stop - tbname = "sub_{}_{}".format(stablename , 0) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) @@ -401,45 +421,47 @@ class TDTestCase: tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - # begin start dnode + # begin start dnode tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end -start) if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - - # create new stables again + + # create new stables again tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - + def unsync_run_case(self): def _restart_dnode_of_db_unsync(dbname): + newTdSql=tdCom.newTdSql() start = time.time() tdDnodes=cluster.dnodes - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + self.stop_dnode_id = self._get_stop_dnode_id(dbname,"follower") + # print("dnode_id:" , self.stop_dnode_id ) # begin restart dnode tdDnodes[self.stop_dnode_id-1].stoptaosd() - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) - + if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - + def _create_threading(dbname): self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,)) return self.current_thread - + ''' - in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive + in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive ''' tdDnodes=cluster.dnodes for loop in range(self.loop_restart_times): @@ -450,7 +472,7 @@ class TDTestCase: tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name)) - # create sync threading and start it + # create sync threading and start it self.current_thread = _create_threading(db_name) self.current_thread.start() @@ -469,7 +491,7 @@ class TDTestCase: tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - # create new stables again + # create new stables again tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) @@ -478,7 +500,7 @@ class TDTestCase: self.current_thread.join() - def run(self): + def run(self): # basic insert and check of cluster self.check_setup_cluster_status() @@ -486,7 +508,7 @@ class TDTestCase: # self.sync_run_case() self.unsync_run_case() - + def stop(self): @@ -494,4 +516,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py index fa7e5292de668ab578f502c643ea9eb5be9a2d8e..edff2747933feeeacdd00eee601895c626251780 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -30,9 +30,9 @@ class TDTestCase: self.ts = 1483200000000 self.ts_step =1000 self.db_name ='testdb' - self.replica = 3 + self.replica = 3 self.vgroups = 1 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.stop_dnode_id = None self.loop_restart_times = 5 @@ -110,7 +110,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(5): tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) tdSql.query("show stables") @@ -142,7 +142,7 @@ class TDTestCase: tdSql.execute(drop_db_sql) tdSql.execute(create_db_sql) tdSql.execute("use {}".format(dbname)) - + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): tdSql.execute("use {}".format(dbname)) tdSql.execute( @@ -151,7 +151,7 @@ class TDTestCase: tags (t1 int) '''.format(stablename) ) - + for i in range(tb_nums): sub_tbname = "sub_{}_{}".format(stablename,i) tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) @@ -162,11 +162,11 @@ class TDTestCase: tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) - + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): - + tdSql.execute("use {}".format(dbname)) - + for row_num in range(append_nums): tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") @@ -176,7 +176,7 @@ class TDTestCase: def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): tdSql.execute("use {}".format(dbname)) - + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: @@ -184,8 +184,8 @@ class TDTestCase: tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) - - count = 0 + + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups; '".format(dbname)) @@ -199,14 +199,14 @@ class TDTestCase: status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) count += 1 - + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: time.sleep(0.1) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) - count = 0 + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups;'".format(dbname)) @@ -221,25 +221,41 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False for vgroup_info in vgroup_infos: - leader_infos = vgroup_info[3:-4] + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='follower': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self , newTdSql): + def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -250,7 +266,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="offline": @@ -259,10 +275,11 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self , newTdSql): + def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -272,7 +289,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="ready": @@ -366,29 +383,32 @@ class TDTestCase: tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) check_status = False return check_status - + def sync_run_case(self): # stop follower and insert datas , update tables and create new stables tdDnodes=cluster.dnodes + newTdSql=tdCom.newTdSql() for loop in range(self.loop_restart_times): db_name = "sync_db_{}".format(loop) stablename = 'stable_{}'.format(loop) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) - self.stop_dnode_id = self._get_stop_dnode_id(db_name) + + self.stop_dnode_id = self._get_stop_dnode_id(db_name,"follower") + # check rows of datas - + self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0) - - # begin stop dnode + + # begin stop dnode start = time.time() tdDnodes[self.stop_dnode_id-1].forcestop() - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) + + # append rows of stablename when dnode stop - # append rows of stablename when dnode stop - tbname = "sub_{}_{}".format(stablename , 0) tdLog.notice(" ==== begin append rows of exists table {} when dnode {} offline ====".format(tbname , self.stop_dnode_id)) self.append_rows_of_exists_tables(db_name ,stablename , tbname , 100 ) @@ -401,50 +421,54 @@ class TDTestCase: tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - # begin start dnode + # begin start dnode tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end -start) if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - - # create new stables again + + # create new stables again tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - + def unsync_run_case(self): def _restart_dnode_of_db_unsync(dbname): start = time.time() tdDnodes=cluster.dnodes - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + newTdSql=tdCom.newTdSql() + self.stop_dnode_id = self._get_stop_dnode_id(dbname,"follower") + # print(self.stop_dnode_id) while not self.stop_dnode_id: + # print(self.stop_dnode_id) time.sleep(0.5) - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + self.stop_dnode_id = self._get_stop_dnode_id(dbname,"follower") # begin restart dnode - - # force stop taosd by kill -9 + + # force stop taosd by kill -9 self.force_stop_dnode(self.stop_dnode_id) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) + os.system(" taos -s 'show dnodes;' ") tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) - + if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - + def _create_threading(dbname): self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,)) return self.current_thread - + ''' - in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive + in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive ''' tdDnodes=cluster.dnodes for loop in range(self.loop_restart_times): @@ -455,7 +479,7 @@ class TDTestCase: tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name)) - # create sync threading and start it + # create sync threading and start it self.current_thread = _create_threading(db_name) self.current_thread.start() @@ -474,7 +498,7 @@ class TDTestCase: tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - # create new stables again + # create new stables again tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) @@ -488,7 +512,7 @@ class TDTestCase: port = None for dnode_info in tdSql.queryResult: if dnode_id == dnode_info[0]: - port = dnode_info[1].split(":")[-1] + port = dnode_info[1].split(":")[-1] break else: continue @@ -502,7 +526,7 @@ class TDTestCase: os.system(ps_kill_taosd) - def run(self): + def run(self): # basic insert and check of cluster self.check_setup_cluster_status() @@ -510,7 +534,7 @@ class TDTestCase: # self.sync_run_case() self.unsync_run_case() - + def stop(self): @@ -518,4 +542,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py index 2e4b299fc620b4912c292a8ed7c2f4d18a3e9265..76bb2cc99618b365dee6aae2fbda1e79fa54f64a 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -28,9 +28,9 @@ class TDTestCase: self.ts = 1483200000000 self.ts_step =1000 self.db_name ='testdb' - self.replica = 3 + self.replica = 3 self.vgroups = 1 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.stop_dnode_id = None self.loop_restart_times = 10 @@ -177,26 +177,40 @@ class TDTestCase: continue - def _get_stop_dnode_id(self,dbname): - newTdSql=tdCom.newTdSql() - newTdSql.query("show {}.vgroups".format(dbname)) - vgroup_infos = newTdSql.queryResult + def _get_stop_dnode_id(self,dbname ,dnode_role): + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: - leader_infos = vgroup_info[3:-4] + leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='leader': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self , newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -207,7 +221,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="offline": @@ -216,10 +230,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -229,7 +243,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="ready": @@ -239,7 +253,7 @@ class TDTestCase: tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id)) def get_leader_infos(self ,dbname): - + newTdSql=tdCom.newTdSql() newTdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = newTdSql.queryResult @@ -259,7 +273,7 @@ class TDTestCase: tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====") else: for vgroup_info in vote_act: - for ind , role in enumerate(vgroup_info): + for ind , role in enumerate(vgroup_info): if role==self.stop_dnode_id: if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: @@ -276,25 +290,28 @@ class TDTestCase: benchmark_build_path = self.getBuildPath() + '/build/bin/taosBenchmark' tdLog.notice("==== start taosBenchmark insert datas of database {} ==== ".format(dbname)) os.system(" {} -f {} >>/dev/null 2>&1 ".format(benchmark_build_path , json_file)) - + def stop_leader_when_Benchmark_inserts(self,dbname , total_rows , json_file ): + + newTdSql=tdCom.newTdSql() + # stop follower and insert datas , update tables and create new stables tdDnodes=cluster.dnodes tdSql.execute(" drop database if exists {} ".format(dbname)) tdSql.execute(" create database {} replica {} vgroups {}".format(dbname , self.replica , self.vgroups)) - - # start insert datas using taosBenchmark ,expect insert 10000 rows - + + # start insert datas using taosBenchmark ,expect insert 10000 rows + self.current_thread = threading.Thread(target=self.start_benchmark_inserts, args=(dbname,json_file)) self.current_thread.start() tdSql.query(" show databases ") - - # make sure create database ok + + # make sure create database ok while (tdSql.queryRows!=3): time.sleep(0.5) tdSql.query(" show databases ") - # # make sure create stable ok + # # make sure create stable ok tdSql.query(" show {}.stables ".format(dbname)) while (tdSql.queryRows!=1): time.sleep(0.5) @@ -313,14 +330,14 @@ class TDTestCase: tdLog.debug(" === current insert {} rows in database {} === ".format(tdSql.queryResult[0][0] , dbname)) time.sleep(0.01) tdSql.query(" select count(*) from {}.{} ".format(dbname,"stb1")) - + tdLog.debug(" === database {} has write {} rows at least ====".format(dbname,total_rows/10)) - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + self.stop_dnode_id = self._get_stop_dnode_id(dbname ,"leader") - # prepare stop leader of database + # prepare stop leader of database before_leader_infos = self.get_leader_infos(dbname) - + tdDnodes[self.stop_dnode_id-1].stoptaosd() # self.current_thread.join() after_leader_infos = self.get_leader_infos(dbname) @@ -331,20 +348,20 @@ class TDTestCase: after_leader_infos = self.get_leader_infos(dbname) revote_status = self.check_revote_leader_success(dbname ,before_leader_infos , after_leader_infos) end = time.time() - time_cost = end - start + time_cost = end - start tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(dbname , time_cost)) self.current_thread.join() - tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) + tdSql.query(" select count(*) from {}.{} ".format(dbname,"stb1")) tdLog.debug(" ==== expected insert {} rows of database {} , really is {}".format(total_rows, dbname , tdSql.queryResult[0][0])) - def run(self): + def run(self): # basic insert and check of cluster # self.check_setup_cluster_status() @@ -359,4 +376,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py index c19a308f1c78a0dbf3036bc752a4ccb160dc43ea..6abe700bd637f7fe2efdcaf5b67a43170991cae1 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -28,15 +28,15 @@ class TDTestCase: self.ts = 1483200000000 self.ts_step =1000 self.db_name ='testdb' - self.replica = 3 + self.replica = 3 self.vgroups = 1 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.stop_dnode_id = None - self.loop_restart_times = 10 + self.loop_restart_times = 5 self.current_thread = None self.max_restart_time = 5 - + self.try_check_times = 10 def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) if ("community" in selfPath): @@ -193,7 +193,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(5): tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) tdSql.query("show stables") @@ -225,7 +225,7 @@ class TDTestCase: tdSql.execute(drop_db_sql) tdSql.execute(create_db_sql) tdSql.execute("use {}".format(dbname)) - + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): tdSql.execute("use {}".format(dbname)) tdSql.execute( @@ -234,8 +234,8 @@ class TDTestCase: tags (t1 int) '''.format(stablename) ) - - for i in range(tb_nums): + + for i in range(tb_nums): sub_tbname = "sub_{}_{}".format(stablename,i) tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) # insert datas about new database @@ -245,11 +245,11 @@ class TDTestCase: tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) - + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): - + tdSql.execute("use {}".format(dbname)) - + for row_num in range(append_nums): tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") @@ -257,9 +257,9 @@ class TDTestCase: os.system("taos -s 'select count(*) from {}.{}';".format(dbname,stablename)) def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): - + tdSql.execute("use {}".format(dbname)) - + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: @@ -267,8 +267,8 @@ class TDTestCase: tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) - - count = 0 + + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups; '".format(dbname)) @@ -282,14 +282,14 @@ class TDTestCase: status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {} ====".format(count , dbname)) count += 1 - + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: time.sleep(0.1) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) - count = 0 + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups;'".format(dbname)) @@ -304,26 +304,40 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): - newTdSql=tdCom.newTdSql() - newTdSql.query("show {}.vgroups".format(dbname)) - vgroup_infos = newTdSql.queryResult + def _get_stop_dnode_id(self,dbname ,dnode_role): + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + status = False for vgroup_info in vgroup_infos: - leader_infos = vgroup_info[3:-4] + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='leader': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): - + def wait_stop_dnode_OK(self ,newTdSql): + def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -334,7 +348,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="offline": @@ -343,10 +357,11 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self,newTdSql): + def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -356,7 +371,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="ready": @@ -366,7 +381,7 @@ class TDTestCase: tdLog.notice("==== stop_dnode has restart , id is {}".format(self.stop_dnode_id)) def get_leader_infos(self ,dbname): - + newTdSql=tdCom.newTdSql() newTdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = newTdSql.queryResult @@ -386,7 +401,7 @@ class TDTestCase: tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====") else: for vgroup_info in vote_act: - for ind , role in enumerate(vgroup_info): + for ind , role in enumerate(vgroup_info): if role==self.stop_dnode_id: if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: @@ -400,12 +415,12 @@ class TDTestCase: return check_status def force_stop_dnode(self, dnode_id ): - + tdSql.query("show dnodes") port = None for dnode_info in tdSql.queryResult: if dnode_id == dnode_info[0]: - port = dnode_info[1].split(":")[-1] + port = dnode_info[1].split(":")[-1] break else: continue @@ -421,36 +436,39 @@ class TDTestCase: def sync_run_case(self): # stop follower and insert datas , update tables and create new stables tdDnodes=cluster.dnodes + newTdSql=tdCom.newTdSql() for loop in range(self.loop_restart_times): db_name = "sync_db_{}".format(loop) stablename = 'stable_{}'.format(loop) self.create_database(dbname = db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = db_name , stablename = stablename , tb_nums= 10 ,row_nums= 10 ) - self.stop_dnode_id = self._get_stop_dnode_id(db_name) + + self.stop_dnode_id = self._get_stop_dnode_id(db_name ,"leader") # check rows of datas - + self.check_insert_rows(db_name ,stablename ,tb_nums=10 , row_nums= 10 ,append_rows=0) - # get leader info before stop + # get leader info before stop before_leader_infos = self.get_leader_infos(db_name) - # begin stop dnode - # force stop taosd by kill -9 + # begin stop dnode + # force stop taosd by kill -9 self.force_stop_dnode(self.stop_dnode_id) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) # vote leaders check - # get leader info after stop + # get leader info after stop after_leader_infos = self.get_leader_infos(db_name) - + revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) # append rows of stablename when dnode stop make sure revote leaders while not revote_status: + after_leader_infos = self.get_leader_infos(db_name) revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) @@ -470,37 +488,38 @@ class TDTestCase: else: tdLog.notice("===== leader of database {} is not ok , append rows fail =====".format(db_name)) - # begin start dnode + # begin start dnode start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end -start) if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - - # create new stables again + + # create new stables again tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - + def unsync_run_case(self): def _restart_dnode_of_db_unsync(dbname): - + tdDnodes=cluster.dnodes - self.stop_dnode_id = self._get_stop_dnode_id(dbname) + newTdSql=tdCom.newTdSql() + self.stop_dnode_id = self._get_stop_dnode_id(dbname ,"leader" ) # begin restart dnode - # force stop taosd by kill -9 - # get leader info before stop + # force stop taosd by kill -9 + # get leader info before stop before_leader_infos = self.get_leader_infos(db_name) self.force_stop_dnode(self.stop_dnode_id) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) # check revote leader when restart servers - # get leader info after stop + # get leader info after stop after_leader_infos = self.get_leader_infos(db_name) revote_status = self.check_revote_leader_success(db_name ,before_leader_infos , after_leader_infos) # append rows of stablename when dnode stop make sure revote leaders @@ -520,30 +539,30 @@ class TDTestCase: tdLog.notice(" ==== check new stable {} when dnode {} offline ====".format('new_stb1' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb1' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - # create new stables again + # create new stables again tdLog.notice(" ==== create new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.create_stable_insert_datas(dbname = db_name , stablename = 'new_stb2' , tb_nums= 10 ,row_nums= 10 ) tdLog.notice(" ==== check new stable {} when dnode {} restart ====".format('new_stb2' , self.stop_dnode_id)) self.check_insert_rows(db_name ,'new_stb2' ,tb_nums=10 , row_nums= 10 ,append_rows=0) - + tdDnodes[self.stop_dnode_id-1].starttaosd() start = time.time() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) - + if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - + def _create_threading(dbname): self.current_thread = threading.Thread(target=_restart_dnode_of_db_unsync, args=(dbname,)) return self.current_thread - + ''' - in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive + in this mode , it will be extra threading control start or stop dnode , insert will always going with not care follower online or alive ''' for loop in range(self.loop_restart_times): db_name = "unsync_db_{}".format(loop) @@ -553,7 +572,7 @@ class TDTestCase: tdLog.notice(" ===== restart dnode of database {} in an unsync threading ===== ".format(db_name)) - # create sync threading and start it + # create sync threading and start it self.current_thread = _create_threading(db_name) self.current_thread.start() @@ -564,7 +583,7 @@ class TDTestCase: self.current_thread.join() - def run(self): + def run(self): # basic insert and check of cluster self.check_setup_cluster_status() @@ -577,4 +596,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py new file mode 100644 index 0000000000000000000000000000000000000000..ed20a51595924bd792e3eb5a4a4fa692408310b1 --- /dev/null +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py @@ -0,0 +1,218 @@ +# author : wenzhouwww +from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE +import taos +import sys +import time +import os + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import TDDnodes +from util.dnodes import TDDnode +from util.cluster import * + +import time +import socket +import subprocess ,threading +sys.path.append(os.path.dirname(__file__)) + +class TDTestCase: + def init(self,conn ,logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + self.host = socket.gethostname() + self.mnode_list = {} + self.dnode_list = {} + self.ts = 1483200000000 + self.db_name ='testdb' + self.replica = 3 + self.vgroups = 1 + self.tb_nums = 10 + self.row_nums = 1000 + self.query_times = 100 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def check_setup_cluster_status(self): + tdSql.query("show mnodes") + for mnode in tdSql.queryResult: + name = mnode[1] + info = mnode + self.mnode_list[name] = info + + tdSql.query("show dnodes") + for dnode in tdSql.queryResult: + name = dnode[1] + info = dnode + self.dnode_list[name] = info + + count = 0 + is_leader = False + mnode_name = '' + for k,v in self.mnode_list.items(): + count +=1 + # only for 1 mnode + mnode_name = k + + if v[2] =='leader': + is_leader=True + + if count==1 and is_leader: + tdLog.notice("===== depoly cluster success with 1 mnode as leader =====") + else: + tdLog.exit("===== depoly cluster fail with 1 mnode as leader =====") + + for k ,v in self.dnode_list.items(): + if k == mnode_name: + if v[3]==0: + tdLog.notice("===== depoly cluster mnode only success at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + tdLog.exit("===== depoly cluster mnode only fail at {} , support_vnodes is {} ".format(mnode_name,v[3])) + else: + continue + + def create_db_check_vgroups(self): + + tdSql.execute("drop database if exists test") + tdSql.execute("create database if not exists test replica 1 duration 300") + tdSql.execute("use test") + tdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + tdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(5): + tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) + tdSql.query("show stables") + tdSql.checkRows(1) + tdSql.query("show tables") + tdSql.checkRows(6) + + tdSql.query("show test.vgroups;") + vgroups_infos = {} # key is id: value is info list + for vgroup_info in tdSql.queryResult: + vgroup_id = vgroup_info[0] + tmp_list = [] + for role in vgroup_info[3:-4]: + if role in ['leader','follower']: + tmp_list.append(role) + vgroups_infos[vgroup_id]=tmp_list + + for k , v in vgroups_infos.items(): + if len(v) ==1 and v[0]=="leader": + tdLog.notice(" === create database replica only 1 role leader check success of vgroup_id {} ======".format(k)) + else: + tdLog.exit(" === create database replica only 1 role leader check fail of vgroup_id {} ======".format(k)) + + def create_db_replica_3_insertdatas(self, dbname, replica_num ,vgroup_nums ,tb_nums , row_nums ): + newTdSql=tdCom.newTdSql() + drop_db_sql = "drop database if exists {}".format(dbname) + create_db_sql = "create database {} replica {} vgroups {}".format(dbname,replica_num,vgroup_nums) + + tdLog.notice(" ==== create database {} and insert rows begin =====".format(dbname)) + newTdSql.execute(drop_db_sql) + newTdSql.execute(create_db_sql) + newTdSql.execute("use {}".format(dbname)) + newTdSql.execute( + '''create table stb1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + tags (t1 int) + ''' + ) + newTdSql.execute( + ''' + create table t1 + (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) + ''' + ) + + for i in range(tb_nums): + sub_tbname = "sub_tb_{}".format(i) + newTdSql.execute("create table {} using stb1 tags({})".format(sub_tbname,i)) + # insert datas about new database + + for row_num in range(row_nums): + ts = self.ts + 1000*row_num + newTdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") + + tdLog.notice(" ==== create database {} and insert rows execute end =====".format(dbname)) + + + + def check_insert_status(self,newTdSql , dbname, tb_nums , row_nums): + # newTdSql=tdCom.newTdSql() + newTdSql.execute("use {}".format(dbname)) + newTdSql.query("select count(*) from {}.{}".format(dbname,'stb1')) + # tdSql.checkData(0 , 0 , tb_nums*row_nums) + newTdSql.query("select distinct tbname from {}.{}".format(dbname,'stb1')) + # tdSql.checkRows(tb_nums) + + def loop_query_constantly(self, times , db_name, tb_nums ,row_nums): + newTdSql=tdCom.newTdSql() + for loop_time in range(times): + tdLog.debug(" === query is going ,this is {}_th query === ".format(loop_time)) + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) + + def loop_create_databases(self, times , tb_nums , row_nums): + newTdSql=tdCom.newTdSql() + for loop_time in range(times): + tdLog.debug(" === create database and insert datas is going ,this is {}_th create === ".format(loop_time)) + db_name = 'loop_db_{}'.format(loop_time) + self.create_db_replica_3_insertdatas(db_name , self.replica , self.vgroups , tb_nums , row_nums) + self.check_insert_status( newTdSql ,db_name, tb_nums , row_nums) + + def run(self): + self.check_setup_cluster_status() + self.create_db_check_vgroups() + + # create mnode + tdSql.execute("create mnode on dnode 2 ") + tdSql.execute("create mnode on dnode 3 ") + os.system("taos -s 'show mnodes;'") + # start writing constantly + writing = threading.Thread(target = self.create_db_replica_3_insertdatas, args=(self.db_name , self.replica , self.vgroups , self.tb_nums , self.row_nums)) + writing.start() + tdSql.query(" show {}.stables ".format(self.db_name)) + while not tdSql.queryResult: + print(tdSql.queryResult) + time.sleep(0.1) + tdSql.query(" show {}.stables ".format(self.db_name)) + + reading = threading.Thread(target = self.loop_query_constantly, args=(self.query_times,self.db_name , self.tb_nums , self.row_nums)) + reading.start() + + create_db = threading.Thread(target = self.loop_create_databases, args=(5, 10 , 10)) + create_db.start() + + writing.join() + reading.join() + create_db.join() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py index 2bfe5447494d8f5fa72004d5442e4eae4fd6309d..d60817a2b4f17f265b3006edc11b5fccc73ca9d5 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -30,9 +30,9 @@ class TDTestCase: self.ts = 1483200000000 self.ts_step =1000 self.db_name ='testdb' - self.replica = 3 + self.replica = 3 self.vgroups = 1 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.stop_dnode_id = None self.loop_restart_times = 5 @@ -40,7 +40,7 @@ class TDTestCase: self.max_restart_time = 10 self.try_check_times = 10 self.query_times = 100 - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -103,7 +103,7 @@ class TDTestCase: tdSql.execute(drop_db_sql) tdSql.execute(create_db_sql) tdSql.execute("use {}".format(dbname)) - + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): tdSql.execute("use {}".format(dbname)) tdSql.execute( @@ -112,7 +112,7 @@ class TDTestCase: tags (t1 int) '''.format(stablename) ) - + for i in range(tb_nums): sub_tbname = "sub_{}_{}".format(stablename,i) tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) @@ -123,11 +123,11 @@ class TDTestCase: tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) - + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): - + tdSql.execute("use {}".format(dbname)) - + for row_num in range(append_nums): tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") @@ -137,7 +137,7 @@ class TDTestCase: def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): tdSql.execute("use {}".format(dbname)) - + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: @@ -145,8 +145,8 @@ class TDTestCase: tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) - - count = 0 + + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups; '".format(dbname)) @@ -160,14 +160,14 @@ class TDTestCase: status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) count += 1 - + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: time.sleep(0.1) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) - count = 0 + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups;'".format(dbname)) @@ -182,25 +182,40 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False for vgroup_info in vgroup_infos: - leader_infos = vgroup_info[3:-4] + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='follower': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -211,7 +226,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="offline": @@ -220,10 +235,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -233,7 +248,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="ready": @@ -327,7 +342,7 @@ class TDTestCase: tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) check_status = False return check_status - + def force_stop_dnode(self, dnode_id ): @@ -335,7 +350,7 @@ class TDTestCase: port = None for dnode_info in tdSql.queryResult: if dnode_id == dnode_info[0]: - port = dnode_info[1].split(":")[-1] + port = dnode_info[1].split(":")[-1] break else: continue @@ -349,9 +364,9 @@ class TDTestCase: os.system(ps_kill_taosd) def basic_query_task(self,dbname ,stablename): - + sql = "select * from {}.{} ;".format(dbname , stablename) - + count = 0 while count < self.query_times: os.system(''' taos -s '{}' >>/dev/null '''.format(sql)) @@ -364,53 +379,53 @@ class TDTestCase: self.thread_list.append(task) for thread in self.thread_list: - + thread.start() return self.thread_list def stop_follower_when_query_going(self): - + tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums) # let query task start - self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' ) + self.thread_list = self.multi_thread_query_task(5 ,self.db_name ,'stb1' ) + # force stop follower for loop in range(self.loop_restart_times): tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name)) - self.stop_dnode_id = self._get_stop_dnode_id(self.db_name) + self.stop_dnode_id = self._get_stop_dnode_id(self.db_name,"follower" ) tdDnodes[self.stop_dnode_id-1].stoptaosd() - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) - + if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - + for thread in self.thread_list: thread.join() - def run(self): + def run(self): # basic check of cluster self.check_setup_cluster_status() self.stop_follower_when_query_going() - - def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py index 2a4e43d904f1ca6b3627e12d84094270b99f9259..da9dc115eb0d19e8d9154508263b5ac7988ef578 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -30,9 +30,9 @@ class TDTestCase: self.ts = 1483200000000 self.ts_step =1000 self.db_name ='testdb' - self.replica = 3 + self.replica = 3 self.vgroups = 1 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.stop_dnode_id = None self.loop_restart_times = 5 @@ -40,7 +40,7 @@ class TDTestCase: self.max_restart_time = 10 self.try_check_times = 10 self.query_times = 100 - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -103,7 +103,7 @@ class TDTestCase: tdSql.execute(drop_db_sql) tdSql.execute(create_db_sql) tdSql.execute("use {}".format(dbname)) - + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): tdSql.execute("use {}".format(dbname)) tdSql.execute( @@ -112,7 +112,7 @@ class TDTestCase: tags (t1 int) '''.format(stablename) ) - + for i in range(tb_nums): sub_tbname = "sub_{}_{}".format(stablename,i) tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) @@ -123,11 +123,11 @@ class TDTestCase: tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) - + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): - + tdSql.execute("use {}".format(dbname)) - + for row_num in range(append_nums): tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") @@ -137,7 +137,7 @@ class TDTestCase: def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): tdSql.execute("use {}".format(dbname)) - + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: @@ -145,8 +145,8 @@ class TDTestCase: tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) - - count = 0 + + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups; '".format(dbname)) @@ -160,14 +160,14 @@ class TDTestCase: status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) count += 1 - + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: time.sleep(0.1) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) - count = 0 + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups;'".format(dbname)) @@ -182,25 +182,40 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False for vgroup_info in vgroup_infos: - leader_infos = vgroup_info[3:-4] + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='follower': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): - + def wait_stop_dnode_OK(self ,newTdSql): + def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -211,7 +226,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="offline": @@ -220,10 +235,10 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has stopped , id is {}".format(self.stop_dnode_id)) - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -233,7 +248,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="ready": @@ -327,7 +342,7 @@ class TDTestCase: tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) check_status = False return check_status - + def force_stop_dnode(self, dnode_id ): @@ -335,7 +350,7 @@ class TDTestCase: port = None for dnode_info in tdSql.queryResult: if dnode_id == dnode_info[0]: - port = dnode_info[1].split(":")[-1] + port = dnode_info[1].split(":")[-1] break else: continue @@ -349,9 +364,9 @@ class TDTestCase: os.system(ps_kill_taosd) def basic_query_task(self,dbname ,stablename): - + sql = "select * from {}.{} ;".format(dbname , stablename) - + count = 0 while count < self.query_times: os.system(''' taos -s '{}' >>/dev/null '''.format(sql)) @@ -364,53 +379,51 @@ class TDTestCase: self.thread_list.append(task) for thread in self.thread_list: - + thread.start() return self.thread_list def stop_follower_when_query_going(self): - + tdDnodes = cluster.dnodes self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums) - # let query task start + # let query task start self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' ) # force stop follower + newTdSql=tdCom.newTdSql() for loop in range(self.loop_restart_times): tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name)) - self.stop_dnode_id = self._get_stop_dnode_id(self.db_name) + self.stop_dnode_id = self._get_stop_dnode_id(self.db_name,"follower" ) self.force_stop_dnode(self.stop_dnode_id) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) - + if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - + for thread in self.thread_list: thread.join() - def run(self): + def run(self): # basic check of cluster self.check_setup_cluster_status() self.stop_follower_when_query_going() - - - def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py index 41606946f6e8b537b5273894c7c791891131e828..561159af892084704da71336acac609fac0eccc2 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -30,9 +30,9 @@ class TDTestCase: self.ts = 1483200000000 self.ts_step =1000 self.db_name ='testdb' - self.replica = 3 + self.replica = 3 self.vgroups = 1 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.stop_dnode_id = None self.loop_restart_times = 5 @@ -40,7 +40,7 @@ class TDTestCase: self.max_restart_time = 10 self.try_check_times = 10 self.query_times = 100 - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -103,7 +103,7 @@ class TDTestCase: tdSql.execute(drop_db_sql) tdSql.execute(create_db_sql) tdSql.execute("use {}".format(dbname)) - + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): tdSql.execute("use {}".format(dbname)) tdSql.execute( @@ -112,7 +112,7 @@ class TDTestCase: tags (t1 int) '''.format(stablename) ) - + for i in range(tb_nums): sub_tbname = "sub_{}_{}".format(stablename,i) tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) @@ -123,11 +123,11 @@ class TDTestCase: tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) - + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): - + tdSql.execute("use {}".format(dbname)) - + for row_num in range(append_nums): tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") @@ -137,7 +137,7 @@ class TDTestCase: def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): tdSql.execute("use {}".format(dbname)) - + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: @@ -145,8 +145,8 @@ class TDTestCase: tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) - - count = 0 + + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups; '".format(dbname)) @@ -160,14 +160,14 @@ class TDTestCase: status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) count += 1 - + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: time.sleep(0.1) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) - count = 0 + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups;'".format(dbname)) @@ -182,25 +182,40 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) for vgroup_info in vgroup_infos: - leader_infos = vgroup_info[3:-4] + leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='leader': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): + def wait_stop_dnode_OK(self,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -211,7 +226,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="offline": @@ -229,7 +244,7 @@ class TDTestCase: tdLog.info(" ===maybe revote not occured , there is no dnode offline ====") else: for vgroup_info in vote_act: - for ind , role in enumerate(vgroup_info): + for ind , role in enumerate(vgroup_info): if role==self.stop_dnode_id: if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: @@ -242,10 +257,11 @@ class TDTestCase: break return check_status - def wait_start_dnode_OK(self): + + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -255,7 +271,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="ready": @@ -351,9 +367,9 @@ class TDTestCase: return check_status - def get_leader_infos(self ,dbname): + def get_leader_infos(self , newTdSql ,dbname): + - newTdSql=tdCom.newTdSql() newTdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = newTdSql.queryResult @@ -369,7 +385,7 @@ class TDTestCase: port = None for dnode_info in tdSql.queryResult: if dnode_id == dnode_info[0]: - port = dnode_info[1].split(":")[-1] + port = dnode_info[1].split(":")[-1] break else: continue @@ -383,9 +399,9 @@ class TDTestCase: os.system(ps_kill_taosd) def basic_query_task(self,dbname ,stablename): - + sql = "select * from {}.{} ;".format(dbname , stablename) - + count = 0 while count < self.query_times: os.system(''' taos -s '{}' >>/dev/null '''.format(sql)) @@ -398,73 +414,70 @@ class TDTestCase: self.thread_list.append(task) for thread in self.thread_list: - + thread.start() return self.thread_list def stop_follower_when_query_going(self): - + tdDnodes = cluster.dnodes self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums) # let query task start - self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' ) + self.thread_list = self.multi_thread_query_task(2 ,self.db_name ,'stb1' ) + newTdSql=tdCom.newTdSql() # force stop follower for loop in range(self.loop_restart_times): tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name)) # get leader info before stop - before_leader_infos = self.get_leader_infos(self.db_name) + before_leader_infos = self.get_leader_infos(newTdSql , self.db_name) - self.stop_dnode_id = self._get_stop_dnode_id(self.db_name) + self.stop_dnode_id = self._get_stop_dnode_id(self.db_name,"leader") tdDnodes[self.stop_dnode_id-1].stoptaosd() - start = time.time() # get leader info after stop - after_leader_infos = self.get_leader_infos(self.db_name) + after_leader_infos = self.get_leader_infos(newTdSql , self.db_name) revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos) while not revote_status: - after_leader_infos = self.get_leader_infos(self.db_name) + after_leader_infos = self.get_leader_infos(newTdSql , self.db_name) revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos) end = time.time() - time_cost = end - start + time_cost = end - start tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost)) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) - + if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - + for thread in self.thread_list: thread.join() - def run(self): + def run(self): # basic check of cluster self.check_setup_cluster_status() self.stop_follower_when_query_going() - - - def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py index 5ddcf1c70e0b5003d792931099ddab9b8d202dff..fb0ddd54350071c1930a6a79ebd8a6f6efda7bde 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -30,9 +30,9 @@ class TDTestCase: self.ts = 1483200000000 self.ts_step =1000 self.db_name ='testdb' - self.replica = 3 + self.replica = 3 self.vgroups = 1 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.stop_dnode_id = None self.loop_restart_times = 5 @@ -40,7 +40,7 @@ class TDTestCase: self.max_restart_time = 10 self.try_check_times = 10 self.query_times = 100 - + def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -103,7 +103,7 @@ class TDTestCase: tdSql.execute(drop_db_sql) tdSql.execute(create_db_sql) tdSql.execute("use {}".format(dbname)) - + def create_stable_insert_datas(self,dbname ,stablename , tb_nums , row_nums): tdSql.execute("use {}".format(dbname)) tdSql.execute( @@ -112,7 +112,7 @@ class TDTestCase: tags (t1 int) '''.format(stablename) ) - + for i in range(tb_nums): sub_tbname = "sub_{}_{}".format(stablename,i) tdSql.execute("create table {} using {} tags({})".format(sub_tbname, stablename ,i)) @@ -123,11 +123,11 @@ class TDTestCase: tdSql.execute(f"insert into {sub_tbname} values ({ts}, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") tdLog.notice(" ==== stable {} insert rows execute end =====".format(stablename)) - + def append_rows_of_exists_tables(self,dbname ,stablename , tbname , append_nums ): - + tdSql.execute("use {}".format(dbname)) - + for row_num in range(append_nums): tdSql.execute(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") # print(f"insert into {tbname} values (now, {row_num} ,{row_num}, 10 ,1 ,{row_num} ,{row_num},true,'bin_{row_num}','nchar_{row_num}',now) ") @@ -137,7 +137,7 @@ class TDTestCase: def check_insert_rows(self, dbname, stablename , tb_nums , row_nums, append_rows): tdSql.execute("use {}".format(dbname)) - + tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: @@ -145,8 +145,8 @@ class TDTestCase: tdSql.query("select count(*) from {}.{}".format(dbname,stablename)) status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) - - count = 0 + + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups; '".format(dbname)) @@ -160,14 +160,14 @@ class TDTestCase: status_OK = self.mycheckData("select count(*) from {}.{}".format(dbname,stablename) ,0 , 0 , tb_nums*row_nums+append_rows) tdLog.notice(" ==== check insert rows first failed , this is {}_th retry check rows of database {}".format(count , dbname)) count += 1 - + tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) while not tdSql.queryResult: time.sleep(0.1) tdSql.query("select distinct tbname from {}.{}".format(dbname,stablename)) status_OK = self.mycheckRows("select distinct tbname from {}.{}".format(dbname,stablename) ,tb_nums) - count = 0 + count = 0 while not status_OK : if count > self.try_check_times: os.system("taos -s ' show {}.vgroups;'".format(dbname)) @@ -182,25 +182,40 @@ class TDTestCase: tdLog.notice(" ==== check insert tbnames first failed , this is {}_th retry check tbnames of database {}".format(count , dbname)) count += 1 - def _get_stop_dnode_id(self,dbname): + def _get_stop_dnode_id(self,dbname ,dnode_role): tdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = tdSql.queryResult + status = False for vgroup_info in vgroup_infos: - leader_infos = vgroup_info[3:-4] + if "error" not in vgroup_info: + status = True + else: + status = False + while status!=True : + time.sleep(0.1) + tdSql.query("show {}.vgroups".format(dbname)) + vgroup_infos = tdSql.queryResult + for vgroup_info in vgroup_infos: + if "error" not in vgroup_info: + status = True + else: + status = False + # print(status) + for vgroup_info in vgroup_infos: + leader_infos = vgroup_info[3:-4] # print(vgroup_info) for ind ,role in enumerate(leader_infos): - if role =='leader': + if role == dnode_role: # print(ind,leader_infos) self.stop_dnode_id = leader_infos[ind-1] break - return self.stop_dnode_id - def wait_stop_dnode_OK(self): - + def wait_stop_dnode_OK(self ,newTdSql): + def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") @@ -211,7 +226,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="offline": @@ -229,7 +244,7 @@ class TDTestCase: tdLog.info(" ===maybe revote not occured , there is no dnode offline ====") else: for vgroup_info in vote_act: - for ind , role in enumerate(vgroup_info): + for ind , role in enumerate(vgroup_info): if role==self.stop_dnode_id: if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: @@ -242,10 +257,10 @@ class TDTestCase: break return check_status - def wait_start_dnode_OK(self): + def wait_start_dnode_OK(self ,newTdSql): def _get_status(): - newTdSql=tdCom.newTdSql() + # newTdSql=tdCom.newTdSql() status = "" newTdSql.query("show dnodes") dnode_infos = newTdSql.queryResult @@ -255,7 +270,7 @@ class TDTestCase: if id == self.stop_dnode_id: status = dnode_status break - return status + return status status = _get_status() while status !="ready": @@ -349,10 +364,10 @@ class TDTestCase: tdLog.info("%s(%d) failed: sql:%s, queryRows:%d != expect:%d" % args) check_status = False return check_status - - + + def get_leader_infos(self ,dbname): - + newTdSql=tdCom.newTdSql() newTdSql.query("show {}.vgroups".format(dbname)) vgroup_infos = newTdSql.queryResult @@ -369,7 +384,7 @@ class TDTestCase: port = None for dnode_info in tdSql.queryResult: if dnode_id == dnode_info[0]: - port = dnode_info[1].split(":")[-1] + port = dnode_info[1].split(":")[-1] break else: continue @@ -383,9 +398,9 @@ class TDTestCase: os.system(ps_kill_taosd) def basic_query_task(self,dbname ,stablename): - + sql = "select * from {}.{} ;".format(dbname , stablename) - + count = 0 while count < self.query_times: os.system(''' taos -s '{}' >>/dev/null '''.format(sql)) @@ -398,35 +413,37 @@ class TDTestCase: self.thread_list.append(task) for thread in self.thread_list: - + thread.start() return self.thread_list def stop_follower_when_query_going(self): - + tdDnodes = cluster.dnodes + newTdSql=tdCom.newTdSql() self.create_database(dbname = self.db_name ,replica_num= self.replica , vgroup_nums= 1) self.create_stable_insert_datas(dbname = self.db_name , stablename = "stb1" , tb_nums= self.tb_nums ,row_nums= self.row_nums) # let query task start - self.thread_list = self.multi_thread_query_task(10 ,self.db_name ,'stb1' ) + self.thread_list = self.multi_thread_query_task(5 ,self.db_name ,'stb1' ) + # force stop follower for loop in range(self.loop_restart_times): tdLog.debug(" ==== this is {}_th restart follower of database {} ==== ".format(loop ,self.db_name)) - # get leader info before stop + # get leader info before stop before_leader_infos = self.get_leader_infos(self.db_name) - self.stop_dnode_id = self._get_stop_dnode_id(self.db_name) + self.stop_dnode_id = self._get_stop_dnode_id(self.db_name ,"leader") self.force_stop_dnode(self.stop_dnode_id) - + start = time.time() - # get leader info after stop + # get leader info after stop after_leader_infos = self.get_leader_infos(self.db_name) - + revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos) while not revote_status: @@ -434,32 +451,32 @@ class TDTestCase: revote_status = self.check_revote_leader_success(self.db_name ,before_leader_infos , after_leader_infos) end = time.time() - time_cost = end - start + time_cost = end - start tdLog.debug(" ==== revote leader of database {} cost time {} ====".format(self.db_name , time_cost)) - self.wait_stop_dnode_OK() + self.wait_stop_dnode_OK(newTdSql) start = time.time() tdDnodes[self.stop_dnode_id-1].starttaosd() - self.wait_start_dnode_OK() + self.wait_start_dnode_OK(newTdSql) end = time.time() time_cost = int(end-start) - + if time_cost > self.max_restart_time: tdLog.exit(" ==== restart dnode {} cost too much time , please check ====".format(self.stop_dnode_id)) - + for thread in self.thread_list: thread.join() - def run(self): + def run(self): # basic check of cluster self.check_setup_cluster_status() self.stop_follower_when_query_going() - + def stop(self): @@ -467,4 +484,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py index 78f6ee153b17fb50a4e0e1074b12f9cea5d14b3d..ff7f84a29d7f7c7de2f17b0367479dcc51e15794 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py @@ -3,7 +3,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -25,9 +25,9 @@ class TDTestCase: self.dnode_list = {} self.ts = 1483200000000 self.db_name ='testdb' - self.replica = 1 + self.replica = 1 self.vgroups = 2 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.max_vote_time_cost = 10 # seconds @@ -101,7 +101,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(5): tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) tdSql.query("show stables") @@ -134,7 +134,7 @@ class TDTestCase: vgroup_id = vgroup_info[0] vgroup_status = [] for ind , role in enumerate(vgroup_info[3:-4]): - + if ind%2==0: continue else: @@ -151,7 +151,7 @@ class TDTestCase: while not status: time.sleep(0.1) status = self.check_vgroups_init_done(dbname) - + # tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname)) end = time.time() cost_time = end - start @@ -159,16 +159,16 @@ class TDTestCase: # os.system("taos -s 'show {}.vgroups;'".format(dbname)) if cost_time >= self.max_vote_time_cost: tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) ) - - + + return cost_time - + def test_init_vgroups_time_costs(self): tdLog.notice(" ====start check time cost about vgroups vote leaders ==== ") tdLog.notice(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost)) - # create database replica 3 vgroups 1 + # create database replica 3 vgroups 1 db1 = 'db_1' create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1) @@ -189,10 +189,10 @@ class TDTestCase: tdLog.notice('=======database {} replica 3 vgroups 100 ======'.format(db3)) tdSql.execute(create_db_replica_3_vgroups_100) self.vote_leader_time_costs(db3) - - - def run(self): + + + def run(self): self.check_setup_cluster_status() self.test_init_vgroups_time_costs() @@ -203,4 +203,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py index 32ee0a87114cada83107134fd86d8f9584a5ef72..97a497dfe93f1f28bc8d2a9e2774bc065b6c3cc1 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py @@ -4,7 +4,7 @@ from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE import taos import sys import time -import os +import os from util.log import * from util.sql import * @@ -13,7 +13,7 @@ from util.dnodes import TDDnodes from util.dnodes import TDDnode from util.cluster import * -import time +import time import random import socket import subprocess @@ -27,9 +27,9 @@ class TDTestCase: self.dnode_list = {} self.ts = 1483200000000 self.db_name ='testdb' - self.replica = 1 + self.replica = 1 self.vgroups = 2 - self.tb_nums = 10 + self.tb_nums = 10 self.row_nums = 100 self.max_vote_time_cost = 10 # seconds self.stop_dnode = None @@ -104,7 +104,7 @@ class TDTestCase: (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) - + for i in range(5): tdSql.execute("create table sub_tb_{} using stb1 tags({})".format(i,i)) tdSql.query("show stables") @@ -133,7 +133,7 @@ class TDTestCase: self.stop_dnode = random.sample(only_dnode_list , 1 )[0] return self.stop_dnode - + def check_vgroups_revote_leader(self,dbname): status = True @@ -145,7 +145,7 @@ class TDTestCase: vgroup_status = [] vgroups_leader_follower = vgroup_info[3:-4] for ind , role in enumerate(vgroups_leader_follower): - + if ind%2==0: if role == stop_dnode_id and vgroups_leader_follower[ind+1]=="offline": tdLog.notice("====== dnode {} has offline , endpoint is {}".format(stop_dnode_id , self.stop_dnode)) @@ -174,7 +174,7 @@ class TDTestCase: if endpoint == self.stop_dnode: status = dnode_status break - return status + return status status = _get_status() while status !="offline": @@ -184,7 +184,7 @@ class TDTestCase: tdLog.notice("==== stop_dnode has stopped , endpoint is {}".format(self.stop_dnode)) def wait_start_dnode_OK(self): - + def _get_status(): status = "" @@ -196,7 +196,7 @@ class TDTestCase: if endpoint == self.stop_dnode: status = dnode_status break - return status + return status status = _get_status() while status !="ready": @@ -205,8 +205,8 @@ class TDTestCase: # tdLog.notice("==== stop dnode has not been stopped , endpoint is {}".format(self.stop_dnode)) tdLog.notice("==== stop_dnode has restart , endpoint is {}".format(self.stop_dnode)) - - + + def random_stop_One_dnode(self): self.stop_dnode = self._get_stop_dnode() stop_dnode_id = self.dnode_list[self.stop_dnode][0] @@ -217,7 +217,7 @@ class TDTestCase: # os.system("taos -s 'show dnodes;'") def Restart_stop_dnode(self): - + tdDnodes=cluster.dnodes stop_dnode_id = self.dnode_list[self.stop_dnode][0] tdDnodes[stop_dnode_id-1].starttaosd() @@ -225,7 +225,7 @@ class TDTestCase: # os.system("taos -s 'show dnodes;'") def check_vgroups_init_done(self,dbname): - + status = True tdSql.query("show {}.vgroups".format(dbname)) @@ -233,7 +233,7 @@ class TDTestCase: vgroup_id = vgroup_info[0] vgroup_status = [] for ind , role in enumerate(vgroup_info[3:-4]): - + if ind%2==0: continue else: @@ -249,7 +249,7 @@ class TDTestCase: while not status: time.sleep(0.1) status = self.check_vgroups_init_done(dbname) - + # tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname)) end = time.time() cost_time = end - start @@ -257,10 +257,10 @@ class TDTestCase: # os.system("taos -s 'show {}.vgroups;'".format(dbname)) if cost_time >= self.max_vote_time_cost: tdLog.exit(" ==== database %s vote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) ) - + return cost_time - + def revote_leader_time_costs(self,dbname): start = time.time() @@ -268,7 +268,7 @@ class TDTestCase: while not status: time.sleep(0.1) status = self.check_vgroups_revote_leader(dbname) - + # tdLog.notice("=== database {} show vgroups vote the leader is in progress ===".format(dbname)) end = time.time() cost_time = end - start @@ -276,10 +276,10 @@ class TDTestCase: # os.system("taos -s 'show {}.vgroups;'".format(dbname)) if cost_time >= self.max_vote_time_cost: tdLog.exit(" ==== database %s revote the leaders cost too large time , cost time is %.3f second ===="%(dbname,cost_time) ) - - + + return cost_time - + def exec_revote_action(self,dbname): tdSql.query("show {}.vgroups".format(dbname)) @@ -296,13 +296,13 @@ class TDTestCase: after_vgroups = set() for vgroup_info in after_revote: after_vgroups.add(vgroup_info[3:-4]) - + vote_act = set(set(after_vgroups)-set(before_vgroups)) if not vote_act: tdLog.exit(" ===maybe revote not occured , there is no dnode offline ====") else: for vgroup_info in vote_act: - for ind , role in enumerate(vgroup_info): + for ind , role in enumerate(vgroup_info): if role==self.dnode_list[self.stop_dnode][0]: if vgroup_info[ind+1] =="offline" and "leader" in vgroup_info: @@ -322,7 +322,7 @@ class TDTestCase: tdLog.notice(" ====start check time cost about vgroups vote leaders ==== ") tdLog.notice(" ==== current max time cost is set value : {} =======".format(self.max_vote_time_cost)) - # create database replica 3 vgroups 1 + # create database replica 3 vgroups 1 db1 = 'db_1' create_db_replica_3_vgroups_1 = "create database {} replica 3 vgroups 1".format(db1) @@ -346,13 +346,13 @@ class TDTestCase: tdSql.execute(create_db_replica_3_vgroups_100) self.vote_leader_time_costs(db3) self.exec_revote_action(db3) - - - def run(self): + + + def run(self): self.check_setup_cluster_status() self.test_init_vgroups_time_costs() - + @@ -361,4 +361,4 @@ class TDTestCase: tdLog.success(f"{__file__} successfully executed") tdCases.addLinux(__file__, TDTestCase()) -tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/system-test/7-tmq/basic5.py b/tests/system-test/7-tmq/basic5.py index 70813eba9644bcb6d57fe49022a6b157cdd69528..94201a335d16eed685aa07ad7e12b1baaa0ae435 100644 --- a/tests/system-test/7-tmq/basic5.py +++ b/tests/system-test/7-tmq/basic5.py @@ -56,7 +56,7 @@ class TDTestCase: return cur def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -69,7 +69,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) return @@ -96,7 +96,7 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - + def prepareEnv(self, **parameterDict): print ("input parameters:") print (parameterDict) @@ -115,7 +115,7 @@ class TDTestCase: parameterDict["ctbNum"],\ parameterDict["rowsPerTbl"],\ parameterDict["batchNum"],\ - parameterDict["startTs"]) + parameterDict["startTs"]) return @@ -135,34 +135,34 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() prepareEnvThread.join() - + # wait stb ready while 1: - tdSql.query("show %s.stables"%parameterDict['dbName']) - if tdSql.getRows() == 1: + tdSql.query("show %s.stables"%parameterDict['dbName']) + if tdSql.getRows() == 1: break else: time.sleep(1) tdLog.info("create topics from super table") topicFromStb = 'topic_stb_column' - topicFromCtb = 'topic_ctb_column' - + topicFromCtb = 'topic_ctb_column' + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName'])) tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName'])) - + time.sleep(1) tdSql.query("show topics") #tdSql.checkRows(2) topic1 = tdSql.getData(0 , 0) topic2 = tdSql.getData(1 , 0) - + tdLog.info("show topics: %s, %s"%(topic1, topic2)) if topic1 != topicFromStb and topic1 != topicFromCtb: - tdLog.exit("topic error1") + tdLog.exit("topic error1") if topic2 != topicFromStb and topic2 != topicFromCtb: - tdLog.exit("topic error2") - + tdLog.exit("topic error2") + tdLog.info("create consume info table and consume result table") cdbName = parameterDict["dbName"] tdSql.query("create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)") @@ -179,7 +179,7 @@ class TDTestCase: sql = "insert into consumeinfo values " sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) tdSql.query(sql) - + tdLog.info("check stb if there are data") while 1: tdSql.query("select count(*) from %s"%parameterDict["stbName"]) @@ -190,26 +190,26 @@ class TDTestCase: break else: time.sleep(1) - + tdLog.info("start consume processor") pollDelay = 20 showMsg = 1 showRow = 1 - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) - shellCmd += "> /dev/null 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) - os.system(shellCmd) + os.system(shellCmd) # wait for data ready # prepareEnvThread.join() - + tdLog.info("insert process end, and start to check consume result") while 1: tdSql.query("select * from consumeresult") @@ -229,7 +229,7 @@ class TDTestCase: tdSql.query("drop topic %s"%topicFromCtb) tdLog.printNoPrefix("======== test case 1 end ...... ") - + def tmqCase2(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 2: add child table with consuming ") # create and start thread @@ -246,13 +246,13 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() prepareEnvThread.join() - + # wait db ready while 1: tdSql.query("show databases") if tdSql.getRows() == 4: print ('==================================================') - print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0)) + print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0)) index = 0 if tdSql.getData(0,0) == parameterDict['dbName']: index = 0 @@ -264,7 +264,7 @@ class TDTestCase: index = 3 else: continue - + if tdSql.getData(index,15) == 'ready': print("******************** index: %d"%index) break @@ -272,12 +272,12 @@ class TDTestCase: continue else: time.sleep(1) - + tdSql.query("use %s"%parameterDict['dbName']) # wait stb ready while 1: tdSql.query("show %s.stables"%parameterDict['dbName']) - if tdSql.getRows() == 1: + if tdSql.getRows() == 1: break else: time.sleep(1) @@ -285,20 +285,20 @@ class TDTestCase: tdLog.info("create topics from super table") topicFromStb = 'topic_stb_column2' topicFromCtb = 'topic_ctb_column2' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName'])) tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName'])) - + time.sleep(1) tdSql.query("show topics") topic1 = tdSql.getData(0 , 0) topic2 = tdSql.getData(1 , 0) tdLog.info("show topics: %s, %s"%(topic1, topic2)) if topic1 != topicFromStb and topic1 != topicFromCtb: - tdLog.exit("topic error1") + tdLog.exit("topic error1") if topic2 != topicFromStb and topic2 != topicFromCtb: - tdLog.exit("topic error2") - + tdLog.exit("topic error2") + tdLog.info("create consume info table and consume result table") cdbName = parameterDict["dbName"] tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) @@ -316,7 +316,7 @@ class TDTestCase: sql = "insert into consumeinfo values " sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) tdSql.query(sql) - + tdLog.info("check stb if there are data") while 1: tdSql.query("select count(*) from %s"%parameterDict["stbName"]) @@ -327,21 +327,21 @@ class TDTestCase: break else: time.sleep(1) - + tdLog.info("start consume processor") pollDelay = 100 showMsg = 1 showRow = 1 if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) - shellCmd += "> /dev/null 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) - os.system(shellCmd) + os.system(shellCmd) # create new child table and insert data newCtbName = 'newctb' @@ -354,7 +354,7 @@ class TDTestCase: # wait for data ready prepareEnvThread.join() - + tdLog.info("insert process end, and start to check consume result") while 1: tdSql.query("select * from consumeresult") @@ -366,7 +366,7 @@ class TDTestCase: tdSql.checkData(0 , 1, consumerId) tdSql.checkData(0 , 3, expectrowcnt) - + tdSql.query("drop topic %s"%topicFromStb) tdSql.query("drop topic %s"%topicFromCtb) @@ -390,13 +390,13 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() prepareEnvThread.join() - + # wait db ready while 1: tdSql.query("show databases") - if tdSql.getRows() == 5: + if tdSql.getRows() == 5: print ('==================================================dbname: %s'%parameterDict['dbName']) - print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),tdSql.getData(3,0),tdSql.getData(4,0)) + print (tdSql.getData(0,0), tdSql.getData(1,0),tdSql.getData(2,0),tdSql.getData(3,0),tdSql.getData(4,0)) index = 0 if tdSql.getData(0,0) == parameterDict['dbName']: index = 0 @@ -409,8 +409,8 @@ class TDTestCase: elif tdSql.getData(4,0) == parameterDict['dbName']: index = 4 else: - continue - + continue + if tdSql.getData(index,15) == 'ready': print("******************** index: %d"%index) break @@ -418,16 +418,16 @@ class TDTestCase: continue else: time.sleep(1) - + tdSql.query("use %s"%parameterDict['dbName']) # wait stb ready while 1: tdSql.query("show %s.stables"%parameterDict['dbName']) - if tdSql.getRows() == 1: + if tdSql.getRows() == 1: break else: time.sleep(1) - + tdLog.info("create stable2 for the seconde topic") parameterDict2 = {'cfg': '', \ 'dbName': 'db3', \ @@ -439,23 +439,23 @@ class TDTestCase: 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict2['cfg'] = cfgPath tdSql.execute("create stable if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict2['dbName'], parameterDict2['stbName'])) - + tdLog.info("create topics from super table") topicFromStb = 'topic_stb_column3' topicFromStb2 = 'topic_stb_column32' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName'])) tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict2['dbName'], parameterDict2['stbName'])) - + tdSql.query("show topics") topic1 = tdSql.getData(0 , 0) topic2 = tdSql.getData(1 , 0) tdLog.info("show topics: %s, %s"%(topic1, topic2)) if topic1 != topicFromStb and topic1 != topicFromStb2: - tdLog.exit("topic error1") + tdLog.exit("topic error1") if topic2 != topicFromStb and topic2 != topicFromStb2: - tdLog.exit("topic error2") - + tdLog.exit("topic error2") + tdLog.info("create consume info table and consume result table") cdbName = parameterDict["dbName"] tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName) @@ -472,7 +472,7 @@ class TDTestCase: sql = "insert into consumeinfo values " sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata) tdSql.query(sql) - + tdLog.info("check stb if there are data") while 1: tdSql.query("select count(*) from %s"%parameterDict["stbName"]) @@ -483,22 +483,22 @@ class TDTestCase: break else: time.sleep(1) - + tdLog.info("start consume processor") pollDelay = 100 showMsg = 1 showRow = 1 - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) - shellCmd += "> /dev/null 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, parameterDict["dbName"], showMsg, showRow, cdbName) + shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) - os.system(shellCmd) + os.system(shellCmd) # start the second thread to create new child table and insert data prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2) @@ -507,7 +507,7 @@ class TDTestCase: # wait for data ready prepareEnvThread.join() prepareEnvThread2.join() - + tdLog.info("insert process end, and start to check consume result") while 1: tdSql.query("select * from consumeresult") @@ -519,7 +519,7 @@ class TDTestCase: tdSql.checkData(0 , 1, consumerId) tdSql.checkData(0 , 3, expectrowcnt) - + tdSql.query("drop topic %s"%topicFromStb) tdSql.query("drop topic %s"%topicFromStb2) @@ -537,7 +537,7 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) def stop(self): diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py b/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py index 22160002145fa5b96e5ab5651deb35a28eee2f32..4add73ec2b7fb804d05c9ef45d6563f1c9edfe1b 100644 --- a/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py +++ b/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 100 self.rowsPerTbl = 1000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -58,12 +58,12 @@ class TDTestCase: tdLog.info("create ctb") tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("insert data") tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("flush db to let data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) return @@ -93,18 +93,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - # tdSql.query(queryString) + # tdSql.query(queryString) # expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -121,29 +121,29 @@ class TDTestCase: paraDict['batchNum'] = 100 paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict) - + tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) pInsertThread.join() - - tdSql.query(queryString) + + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) - - tdLog.info("wait the consume result") + + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) if expectRowsList[0] != resultList[0]: tdLog.exit("%d tmq consume rows error!"%consumerId) - # tmqCom.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("flush database %s"%(paraDict['dbName'])) - + for i in range(len(topicNameList)): - tmqCom.waitSubscriptionExit(tdSql,topicNameList[i]) + tmqCom.waitSubscriptionExit(tdSql,topicNameList[i]) tdSql.query("drop topic %s"%topicNameList[i]) tdLog.printNoPrefix("======== test case 1 end ...... ") @@ -173,18 +173,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -200,36 +200,36 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) actConsumeRows = resultList[0] - + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(actConsumeRows, expectrowcnt, totalRowsInserted)) if not (expectrowcnt <= actConsumeRows and totalRowsInserted >= actConsumeRows): tdLog.exit("%d tmq consume rows error!"%consumerId) - + # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 2 expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeRows = resultList[0] - tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted)) + tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted)) if not ((actConsumeRows >= expectrowcnt) and (totalRowsInserted > actConsumeRows)): tdLog.exit("%d tmq consume rows error!"%consumerId) - + for i in range(len(topicNameList)): - tmqCom.waitSubscriptionExit(tdSql,topicNameList[i]) + tmqCom.waitSubscriptionExit(tdSql,topicNameList[i]) tdSql.query("drop topic %s"%topicNameList[i]) tdLog.printNoPrefix("======== test case 2 end ...... ") diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal.py b/tests/system-test/7-tmq/dataFromTsdbNWal.py index faa70f482077d66cff9861eaf313a0f3698f5c96..950c8fdcf6218fb6a0cc54573009bef41ffc0ffd 100644 --- a/tests/system-test/7-tmq/dataFromTsdbNWal.py +++ b/tests/system-test/7-tmq/dataFromTsdbNWal.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 1 self.rowsPerTbl = 10000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -58,12 +58,12 @@ class TDTestCase: tdLog.info("create ctb") tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("insert data") tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("flush db to let data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) return @@ -93,18 +93,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - # tdSql.query(queryString) + # tdSql.query(queryString) # expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -121,29 +121,29 @@ class TDTestCase: paraDict['batchNum'] = 100 paraDict['startTs'] = paraDict['startTs'] + self.rowsPerTbl pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict) - + tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) pInsertThread.join() - - tdSql.query(queryString) + + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) - - tdLog.info("wait the consume result") + + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) if expectRowsList[0] != resultList[0]: tdLog.exit("%d tmq consume rows error!"%consumerId) - tmqCom.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) tdSql.query("flush database %s"%(paraDict['dbName'])) for i in range(len(topicNameList)): - tmqCom.waitSubscriptionExit(tdSql,topicNameList[i]) + tmqCom.waitSubscriptionExit(tdSql,topicNameList[i]) tdSql.query("drop topic %s"%topicNameList[i]) tdLog.printNoPrefix("======== test case 1 end ...... ") @@ -173,18 +173,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -200,36 +200,36 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) actConsumeRows = resultList[0] - + tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(actConsumeRows, expectrowcnt, totalRowsInserted)) if not (expectrowcnt <= actConsumeRows and totalRowsInserted >= actConsumeRows): tdLog.exit("%d tmq consume rows error!"%consumerId) - + # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 2 expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeRows = resultList[0] - tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted)) + tdLog.info("act consume rows: %d, expect rows: %d, act insert rows: %d"%(actConsumeRows, expectrowcnt, totalRowsInserted)) if not ((actConsumeRows >= expectrowcnt) and (totalRowsInserted > actConsumeRows)): tdLog.exit("%d tmq consume rows error!"%consumerId) for i in range(len(topicNameList)): - tmqCom.waitSubscriptionExit(tdSql,topicNameList[i]) + tmqCom.waitSubscriptionExit(tdSql,topicNameList[i]) tdSql.query("drop topic %s"%topicNameList[i]) tdLog.printNoPrefix("======== test case 2 end ...... ") diff --git a/tests/system-test/7-tmq/db.py b/tests/system-test/7-tmq/db.py index 1fd0638d172fe6dfb6f19c1617e1eebc38ccac41..da5d7fefd2b2ca2f06ceaf81d6ceb116e8bbdfcd 100644 --- a/tests/system-test/7-tmq/db.py +++ b/tests/system-test/7-tmq/db.py @@ -56,12 +56,12 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("drop database if exists %s "%(cdbName)) tdSql.query("create database %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) - tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) @@ -75,7 +75,7 @@ class TDTestCase: tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -90,11 +90,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -102,14 +102,14 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -139,7 +139,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -158,7 +158,7 @@ class TDTestCase: ctbDict[i] = 0 #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfCtb = 0 + rowsOfCtb = 0 while rowsOfCtb < rowsPerTbl: for i in range(ctbNum): sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) @@ -185,7 +185,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(ctbPrefix,i) for j in range(rowsPerTbl): @@ -216,7 +216,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) for j in range(rowsPerTbl): @@ -235,8 +235,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -255,7 +255,7 @@ class TDTestCase: return def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") + tdLog.printNoPrefix("======== test case 1: ") ''' subscribe one db, multi normal table which have not same schema, and include rows of all tables in one insert sql ''' @@ -274,11 +274,11 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) tdSql.execute("create table %s.ntb0 (ts timestamp, c1 int)"%(parameterDict["dbName"])) tdSql.execute("create table %s.ntb1 (ts timestamp, c1 int, c2 float)"%(parameterDict["dbName"])) - tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(parameterDict["dbName"])) + tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(parameterDict["dbName"])) tdSql.execute("create table %s.ntb3 (ts timestamp, c1 int, c2 float, c3 binary(32), c4 timestamp)"%(parameterDict["dbName"])) tdSql.execute("insert into %s.ntb0 values(now, 1) %s.ntb1 values(now, 1, 1) %s.ntb2 values(now, 1, 1, '1') %s.ntb3 values(now, 1, 1, '1', now)"%(parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"],parameterDict["dbName"])) @@ -301,7 +301,7 @@ class TDTestCase: tdLog.info("create topics from db") topicFromDb = 'topic_db_mulit_tbl' - + tdSql.execute("create topic %s as database %s" %(topicFromDb, parameterDict['dbName'])) consumerId = 0 expectrowcnt = numOfNtb * rowsOfPerNtb @@ -324,7 +324,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -334,7 +334,7 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") ''' subscribe one stb, multi child talbe and normal table which have not same schema, and include rows of all tables in one insert sql ''' @@ -355,7 +355,7 @@ class TDTestCase: parameterDict['cfg'] = cfgPath dbName = parameterDict["dbName"] - + self.create_database(tdSql, dbName) tdSql.execute("create stable %s.stb (ts timestamp, s1 bigint, s2 binary(32), s3 double) tags (t1 int, t2 binary(32))"%(dbName)) @@ -364,7 +364,7 @@ class TDTestCase: tdSql.execute("create table %s.ntb0 (ts timestamp, c1 binary(32))"%(dbName)) tdSql.execute("create table %s.ntb1 (ts timestamp, c1 binary(32), c2 float)"%(dbName)) - tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(dbName)) + tdSql.execute("create table %s.ntb2 (ts timestamp, c1 int, c2 float, c3 binary(32))"%(dbName)) tdSql.execute("create table %s.ntb3 (ts timestamp, c1 int, c2 float, c3 binary(32), c4 timestamp)"%(dbName)) tdSql.execute("insert into %s.ntb0 values(now, 'ntb0-11') \ @@ -401,7 +401,7 @@ class TDTestCase: tdLog.info("create topics from db") topicFromStb = 'topic_stb_mulit_tbl' - + tdSql.execute("create topic %s as stable %s.stb" %(topicFromStb, dbName)) consumerId = 0 expectrowcnt = numOfCtb * rowsOfPerNtb @@ -424,7 +424,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -445,7 +445,7 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py b/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py index 4dac872fdea51f284c3edde04db7416721a70936..fc4fdcecf9f31dc341f1a26d49b4d7efda5deae3 100644 --- a/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py +++ b/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py @@ -38,20 +38,20 @@ class TDTestCase: cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) tdLog.info(cmdStr) os.system(cmdStr) - + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() while True: dst = queryFile.readline() src = consumeFile.readline() - + if dst: if dst != src: tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) @@ -84,7 +84,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replica) tdLog.info("create stb") @@ -101,13 +101,13 @@ class TDTestCase: # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) # tmqCom.asyncInsertDataByInterlace(paraDict) tmqCom.create_ntable(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=1) - tmqCom.insert_rows_into_ntbl(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"], startTs=paraDict["startTs"], tblNum=1, rows=2) # tdLog.info("restart taosd to ensure that the data falls into the disk") + tmqCom.insert_rows_into_ntbl(tdSql, dbname=paraDict["dbName"], tbname_prefix="ntb", tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"], startTs=paraDict["startTs"], tblNum=1, rows=2) # tdLog.info("restart taosd to ensure that the data falls into the disk") tdSql.query("drop database %s"%paraDict["dbName"]) return - def tmqCase1(self): - tdLog.printNoPrefix("======== test case 1: ") - + def tmqCase1(self): + tdLog.printNoPrefix("======== test case 1: ") + # create and start thread paraDict = {'dbName': 'dbt', 'dropFlag': 1, @@ -132,14 +132,14 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + consumerId = 0 expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] topicList = topicFromStb1 @@ -166,13 +166,13 @@ class TDTestCase: tdSql.query(queryString) totalRowsInserted = tdSql.getRows() - + tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt)) - + if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tmqCom.waitSubscriptionExit(tdSql, topicFromStb1) tdSql.query("drop topic %s"%topicFromStb1) diff --git a/tests/system-test/7-tmq/schema.py b/tests/system-test/7-tmq/schema.py index 699c252c3181f2988b00da7a609a7b006f2a7ee5..34d36e57927ca8605b11deeac7a1c4165bbf9905 100644 --- a/tests/system-test/7-tmq/schema.py +++ b/tests/system-test/7-tmq/schema.py @@ -56,12 +56,12 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("drop database if exists %s "%(cdbName)) tdSql.query("create database %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) - tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) + tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) @@ -75,7 +75,7 @@ class TDTestCase: tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -90,11 +90,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -103,9 +103,9 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -135,7 +135,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -154,7 +154,7 @@ class TDTestCase: ctbDict[i] = 0 #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfCtb = 0 + rowsOfCtb = 0 while rowsOfCtb < rowsPerTbl: for i in range(ctbNum): sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) @@ -181,7 +181,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(ctbPrefix,i) for j in range(rowsPerTbl): @@ -212,7 +212,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) for j in range(rowsPerTbl): @@ -231,8 +231,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -265,7 +265,7 @@ class TDTestCase: 'batchNum': 23, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + tdLog.info("create database, super table, child table, normal table") ntbName = 'ntb1' self.create_database(tdSql, parameterDict["dbName"]) @@ -278,10 +278,10 @@ class TDTestCase: tdLog.info("create topics from super table and normal table") columnTopicFromStb = 'column_topic_from_stb1' columnTopicFromNtb = 'column_topic_from_ntb1' - + tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName'])) tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(columnTopicFromNtb, parameterDict['dbName'], ntbName)) - + tdLog.info("======== super table test:") # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -341,12 +341,12 @@ class TDTestCase: tdLog.info("======== child table test:") parameterDict['stbName'] = 'stb12' ctbName = 'stb12_0' - tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) tdSql.query("create table %s.%s using %s.%s tags (1, '2', 3, '4', '5')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName'])) tdLog.info("create topics from child table") columnTopicFromCtb = 'column_topic_from_ctb1' - + tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s" %(columnTopicFromCtb,parameterDict['dbName'],ctbName)) # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic @@ -388,7 +388,7 @@ class TDTestCase: tdSql.query("alter table %s.%s add column c4 float"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s add tag t3 int"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s add tag t4 float"%(parameterDict['dbName'], parameterDict['stbName'])) - + tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self, cfgPath, buildPath): @@ -406,7 +406,7 @@ class TDTestCase: 'batchNum': 23, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + tdLog.info("create database, super table, child table, normal table") self.create_database(tdSql, parameterDict["dbName"]) ntbName = 'ntb2' @@ -416,18 +416,18 @@ class TDTestCase: tdLog.info("create topics from super table and normal table") columnTopicFromStb = 'column_topic_from_stb2' columnTopicFromNtb = 'column_topic_from_ntb2' - + tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s where c3 > 3 and c4 like 'abc' and t3 = 5 and t4 = 'beijing'" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName'])) tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s where c3 > 3 and c4 like 'abc'" %(columnTopicFromNtb, parameterDict['dbName'], ntbName)) - + tdLog.info("======== super table test:") # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -485,12 +485,12 @@ class TDTestCase: tdLog.info("======== child table test:") parameterDict['stbName'] = 'stb21' ctbName = 'stb21_0' - tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) tdSql.query("create table %s.%s using %s.%s tags (1, '2', 3, '4', '5')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName'])) tdLog.info("create topics from child table") columnTopicFromCtb = 'column_topic_from_ctb2' - + tdSql.execute("create topic %s as select ts, c1, c2, t1, t2 from %s.%s where c3 > 3 and c4 like 'abc' and t3 = 5 and t4 = 'beijing'" %(columnTopicFromCtb,parameterDict['dbName'],ctbName)) # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic @@ -536,11 +536,11 @@ class TDTestCase: tdSql.query("alter table %s.%s add column c5 float"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s add tag t5 float"%(parameterDict['dbName'], parameterDict['stbName'])) - + tdLog.printNoPrefix("======== test case 2 end ...... ") def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") + tdLog.printNoPrefix("======== test case 3: ") parameterDict = {'cfg': '', \ 'actionType': 0, \ 'dbName': 'db3', \ @@ -554,7 +554,7 @@ class TDTestCase: 'batchNum': 23, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + tdLog.info("create database, super table, child table, normal table") self.create_database(tdSql, parameterDict["dbName"]) ntbName = 'ntb3' @@ -564,19 +564,19 @@ class TDTestCase: tdLog.info("create topics from super table and normal table") columnTopicFromStb = 'star_topic_from_stb3' columnTopicFromNtb = 'star_topic_from_ntb3' - + tdSql.execute("create topic %s as select * from %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName'])) tdSql.execute("create topic %s as select * from %s.%s " %(columnTopicFromNtb, parameterDict['dbName'], ntbName)) - + tdLog.info("======== super table test:") # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop tag t1"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.error("alter table %s.%s drop tag t2"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop tag t3"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop tag t4"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop tag t5"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -627,12 +627,12 @@ class TDTestCase: tdLog.info("======== child table test:") parameterDict['stbName'] = 'stb31' ctbName = 'stb31_0' - tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) tdSql.query("create table %s.%s using %s.%s tags (10, '10', 10, '10', '10')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName'])) tdLog.info("create topics from child table") columnTopicFromCtb = 'column_topic_from_ctb3' - + tdSql.execute("create topic %s as select * from %s.%s " %(columnTopicFromCtb,parameterDict['dbName'],ctbName)) tdSql.error("alter table %s.%s modify column c2 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -647,7 +647,7 @@ class TDTestCase: tdSql.query("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName)) tdSql.query("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName)) tdSql.query("alter table %s.%s set tag t5='20'"%(parameterDict['dbName'], ctbName)) - + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -662,7 +662,7 @@ class TDTestCase: # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic tdSql.query("alter table %s.%s add column c6 float"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName'])) - + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -679,7 +679,7 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 3 end ...... ") def tmqCase4(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 4: ") + tdLog.printNoPrefix("======== test case 4: ") parameterDict = {'cfg': '', \ 'actionType': 0, \ 'dbName': 'db4', \ @@ -695,7 +695,7 @@ class TDTestCase: parameterDict['cfg'] = cfgPath ctbName = 'stb4_0' - + tdLog.info("create database, super table, child table, normal table") self.create_database(tdSql, parameterDict["dbName"]) tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"])) @@ -703,7 +703,7 @@ class TDTestCase: tdLog.info("create topics from super table") columnTopicFromStb = 'star_topic_from_stb4' - + tdSql.execute("create topic %s as stable %s.%s" %(columnTopicFromStb, parameterDict['dbName'], parameterDict['stbName'])) tdLog.info("======== child table test:") @@ -739,10 +739,10 @@ class TDTestCase: tdSql.query("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop tag t1new"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -750,7 +750,7 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 4 end ...... ") def tmqCase5(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 5: ") + tdLog.printNoPrefix("======== test case 5: ") parameterDict = {'cfg': '', \ 'actionType': 0, \ 'dbName': 'db5', \ @@ -766,7 +766,7 @@ class TDTestCase: parameterDict['cfg'] = cfgPath ctbName = 'stb5_0' - + tdLog.info("create database, super table, child table, normal table") self.create_database(tdSql, parameterDict["dbName"]) tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict["stbName"])) @@ -774,7 +774,7 @@ class TDTestCase: tdLog.info("create topics from super table") columnTopicFromStb = 'star_topic_from_db5' - + tdSql.execute("create topic %s as database %s" %(columnTopicFromStb, parameterDict['dbName'])) tdLog.info("======== child table test:") @@ -810,10 +810,10 @@ class TDTestCase: tdSql.query("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop column c3"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop column c4"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop column c5"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop tag t1new"%(parameterDict['dbName'], parameterDict['stbName'])) - tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName'])) + tdSql.query("alter table %s.%s drop tag t2new"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop tag t3new"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop tag t4new"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s drop tag t5new"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -821,7 +821,7 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 5 end ...... ") def tmqCase6(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 6: ") + tdLog.printNoPrefix("======== test case 6: ") parameterDict = {'cfg': '', \ 'actionType': 0, \ 'dbName': 'db6', \ @@ -835,18 +835,18 @@ class TDTestCase: 'batchNum': 23, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + tdLog.info("create database, super table, child table, normal table") self.create_database(tdSql, parameterDict["dbName"]) tdLog.info("======== child table test:") parameterDict['stbName'] = 'stb6' ctbName = 'stb6_0' - tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) + tdSql.query("create table %s.%s (ts timestamp, c1 int, c2 binary(32), c3 double, c4 binary(32), c5 nchar(10)) tags (t1 int, t2 binary(32), t3 double, t4 binary(32), t5 nchar(10))"%(parameterDict["dbName"],parameterDict['stbName'])) tdSql.query("create table %s.%s using %s.%s tags (10, '10', 10, '10', '10')"%(parameterDict["dbName"],ctbName,parameterDict["dbName"],parameterDict['stbName'])) tdLog.info("create topics from child table") columnTopicFromCtb = 'column_topic_from_ctb6' - + tdSql.execute("create topic %s as select c1, c2, c3 from %s.%s where t1 > 10 and t2 = 'beijign' and sin(t3) < 0" %(columnTopicFromCtb,parameterDict['dbName'],ctbName)) tdSql.error("alter table %s.%s modify column c1 binary(40)"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -861,7 +861,7 @@ class TDTestCase: tdSql.error("alter table %s.%s set tag t3=20"%(parameterDict['dbName'], ctbName)) tdSql.query("alter table %s.%s set tag t4='20'"%(parameterDict['dbName'], ctbName)) tdSql.query("alter table %s.%s set tag t5='20'"%(parameterDict['dbName'], ctbName)) - + tdSql.error("alter table %s.%s rename column c1 c1new"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s rename column c2 c2new"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s rename column c3 c3new"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -876,7 +876,7 @@ class TDTestCase: # alter actions allowed: drop column/tag, modify column/tag type, rename column/tag not included in topic tdSql.query("alter table %s.%s add column c6 float"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.query("alter table %s.%s add tag t6 float"%(parameterDict['dbName'], parameterDict['stbName'])) - + # alter actions prohibited: drop column/tag, modify column/tag type, rename column/tag included in topic tdSql.error("alter table %s.%s drop column c1"%(parameterDict['dbName'], parameterDict['stbName'])) tdSql.error("alter table %s.%s drop column c2"%(parameterDict['dbName'], parameterDict['stbName'])) @@ -903,7 +903,7 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) self.tmqCase4(cfgPath, buildPath) self.tmqCase5(cfgPath, buildPath) diff --git a/tests/system-test/7-tmq/stbFilter.py b/tests/system-test/7-tmq/stbFilter.py index 7ad3cc99e78c56803f76f864fa718a18f32547f0..4942a39db458f0e307649903e7449295d8909cc3 100644 --- a/tests/system-test/7-tmq/stbFilter.py +++ b/tests/system-test/7-tmq/stbFilter.py @@ -79,27 +79,27 @@ class TDTestCase: topicNameList = ['topic1', 'topic2', 'topic3'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 4 == 0" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) queryString = "select ts, log(c1), cos(c1) from %s.%s where c1 > 5000" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[1], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) queryString = "select ts, log(c1), atan(c1) from %s.%s where ts >= %d" %(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9000) sqlString = "create topic %s as %s" %(topicNameList[2], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -115,10 +115,10 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") @@ -132,7 +132,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[1] != resultList[0]: @@ -148,14 +148,14 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[2] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0])) tdLog.exit("2 tmq consume rows error!") - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -193,7 +193,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) queryString = "select ts, sin(c1), pow(c2,3) from %s.%s where sin(c2) >= 0" %(paraDict['dbName'], paraDict['stbName']) @@ -209,7 +209,7 @@ class TDTestCase: tdSql.execute(sqlString) tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) - + # start tmq consume processor tdLog.info("insert consume info to consume processor") consumerId = 0 @@ -223,10 +223,10 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") @@ -240,7 +240,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[1] != resultList[0]: @@ -256,14 +256,14 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[2] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0])) tdLog.exit("2 tmq consume rows error!") - # time.sleep(10) + # time.sleep(10) # for i in range(len(topicNameList)): # tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/stbTagFilter-1ctb.py b/tests/system-test/7-tmq/stbTagFilter-1ctb.py index 003dd9a47d5698efafda6a112868d78854e8a6ef..6a26d2ce1f38774b2d63031c518883641c23f864 100644 --- a/tests/system-test/7-tmq/stbTagFilter-1ctb.py +++ b/tests/system-test/7-tmq/stbTagFilter-1ctb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 1 self.rowsPerTbl = 10000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -65,11 +65,11 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return - + def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'dbt', @@ -95,7 +95,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + # update to half tables # paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", @@ -103,16 +103,16 @@ class TDTestCase: # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_UpperCase_stb1' + topicFromStb1 = 'topic_UpperCase_stb1' # queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - + tdSql.execute(sqlString) + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 0 @@ -139,18 +139,18 @@ class TDTestCase: tdLog.info("run select sql from db") tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQuery)) if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error!") - - tmqCom.checkFileContent(consumerId, queryString) + + tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -170,15 +170,15 @@ class TDTestCase: 'showMsg': 1, 'showRow': 1, 'snapshot': 0} - + paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) - + # update to half tables # paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2) # paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) @@ -187,17 +187,17 @@ class TDTestCase: # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tmqCom.initConsumerTable() tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_UpperCase_stb1' + topicFromStb1 = 'topic_UpperCase_stb1' queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) # queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'shanghai' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 1 @@ -213,13 +213,13 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2) paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -229,7 +229,7 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt)) if self.snapshot == 0: if totalConsumeRows != expectrowcnt: @@ -237,8 +237,8 @@ class TDTestCase: elif self.snapshot == 1: if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) @@ -251,14 +251,14 @@ class TDTestCase: tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.tmqCase1() self.tmqCase2() - + self.prepareTestEnv() tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 self.tmqCase1() self.tmqCase2() - + def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/stbTagFilter-multiCtb.py b/tests/system-test/7-tmq/stbTagFilter-multiCtb.py index 1ea23fe376aa928c892c4bd49b85eddc83f7158d..9053bf26206f73950018f41e05e247fe9e36c181 100644 --- a/tests/system-test/7-tmq/stbTagFilter-multiCtb.py +++ b/tests/system-test/7-tmq/stbTagFilter-multiCtb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 100 self.rowsPerTbl = 1000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -65,11 +65,11 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return - + def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'dbt', @@ -95,7 +95,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + # update to half tables # paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", @@ -103,16 +103,16 @@ class TDTestCase: # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_UpperCase_stb1' + topicFromStb1 = 'topic_UpperCase_stb1' queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) # queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - + tdSql.execute(sqlString) + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 0 @@ -139,18 +139,18 @@ class TDTestCase: tdLog.info("run select sql from db") tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQuery)) if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -170,15 +170,15 @@ class TDTestCase: 'showMsg': 1, 'showRow': 1, 'snapshot': 0} - + paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) - + # update to half tables # paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2) # paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) @@ -187,17 +187,17 @@ class TDTestCase: # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tmqCom.initConsumerTable() tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_UpperCase_stb1' + topicFromStb1 = 'topic_UpperCase_stb1' # queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) queryString = "select ts, c1, c2, t4 from %s.%s where t4 == 'beijing' or t4 == 'changsha'"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 1 @@ -213,7 +213,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 7/10) paraDict['ctbStartIdx'] = int(paraDict['ctbNum'] * 7/10) # paraDict["rowsPerTbl"] = 100 @@ -221,7 +221,7 @@ class TDTestCase: tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -231,7 +231,7 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt)) if self.snapshot == 0: if totalConsumeRows != expectrowcnt / 2: @@ -239,8 +239,8 @@ class TDTestCase: elif self.snapshot == 1: if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error when snapshot is 1!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) @@ -253,13 +253,13 @@ class TDTestCase: tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.tmqCase1() self.tmqCase2() - + self.prepareTestEnv() tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 self.tmqCase1() - self.tmqCase2() + self.tmqCase2() def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeDb.py b/tests/system-test/7-tmq/subscribeDb.py index b2c569e31e3df633369af4331ec7b3b7cf392672..ba46f72695fb607ba14f48821db37cce1da1d441 100644 --- a/tests/system-test/7-tmq/subscribeDb.py +++ b/tests/system-test/7-tmq/subscribeDb.py @@ -49,7 +49,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -58,7 +58,7 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -73,11 +73,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -85,20 +85,20 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -111,8 +111,8 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - - event.set() + + event.set() tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) return @@ -141,7 +141,7 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - + def prepareEnv(self, **parameterDict): print ("input parameters:") print (parameterDict) @@ -159,7 +159,7 @@ class TDTestCase: parameterDict["ctbNum"],\ parameterDict["rowsPerTbl"],\ parameterDict["batchNum"],\ - parameterDict["startTs"]) + parameterDict["startTs"]) return def tmqCase1(self, cfgPath, buildPath): @@ -182,10 +182,10 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() - + tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -197,7 +197,7 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") @@ -208,14 +208,14 @@ class TDTestCase: # wait for data ready prepareEnvThread.join() - + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -226,7 +226,7 @@ class TDTestCase: self.initConsumerTable() tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -238,7 +238,7 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor") pollDelay = 20 showMsg = 1 @@ -250,7 +250,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -279,12 +279,12 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() - + tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) - + consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 @@ -298,25 +298,25 @@ class TDTestCase: consumerId = 1 self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") pollDelay = 20 showMsg = 1 - showRow = 1 + showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) # wait for data ready prepareEnvThread.join() - + tdLog.info("insert process end, and start to check consume result") expectRows = 2 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) if not (totalConsumeRows >= expectrowcnt): tdLog.exit("tmq consume rows error!") @@ -343,12 +343,12 @@ class TDTestCase: tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups'])) tdSql.execute("create table if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict['dbName'], parameterDict['stbName'])) - + tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) - + consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] topicList = topicName1 @@ -366,11 +366,11 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor") pollDelay = 100 showMsg = 1 - showRow = 1 + showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) @@ -378,14 +378,14 @@ class TDTestCase: # wait for data ready prepareEnvThread.join() - + tdLog.info("insert process end, and start to check consume result") expectRows = 2 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt * 2: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) tdLog.exit("tmq consume rows error!") @@ -430,9 +430,9 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) - + consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] topicList = topicName1 @@ -443,10 +443,10 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + # consumerId = 1 # self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") @@ -456,16 +456,16 @@ class TDTestCase: self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) # wait for data ready - prepareEnvThread.join() + prepareEnvThread.join() prepareEnvThread2.join() - + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -486,10 +486,10 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) - self.tmqCase2a(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) + self.tmqCase2a(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) - + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") diff --git a/tests/system-test/7-tmq/subscribeDb0.py b/tests/system-test/7-tmq/subscribeDb0.py index 4e8fb04517dc228329235994174c482d59868383..7720001fbb79353e8c81eea89aded209f1a8fc99 100644 --- a/tests/system-test/7-tmq/subscribeDb0.py +++ b/tests/system-test/7-tmq/subscribeDb0.py @@ -49,7 +49,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -58,7 +58,7 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -73,11 +73,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -85,20 +85,20 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -111,8 +111,8 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - - event.set() + + event.set() tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) return @@ -141,7 +141,7 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - + def prepareEnv(self, **parameterDict): print ("input parameters:") print (parameterDict) @@ -159,7 +159,7 @@ class TDTestCase: parameterDict["ctbNum"],\ parameterDict["rowsPerTbl"],\ parameterDict["batchNum"],\ - parameterDict["startTs"]) + parameterDict["startTs"]) return def tmqCase4(self, cfgPath, buildPath): @@ -198,9 +198,9 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) - + consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] topicList = topicName1 @@ -211,10 +211,10 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + consumerId = 1 self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") @@ -224,16 +224,16 @@ class TDTestCase: self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) # wait for data ready - prepareEnvThread.join() + prepareEnvThread.join() prepareEnvThread2.join() - + tdLog.info("insert process end, and start to check consume result") expectRows = 2 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -275,9 +275,9 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) - + consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] topicList = topicName1 @@ -288,10 +288,10 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + consumerId = 1 self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") @@ -304,16 +304,16 @@ class TDTestCase: prepareEnvThread2.start() # wait for data ready - prepareEnvThread.join() + prepareEnvThread.join() prepareEnvThread2.join() - + tdLog.info("insert process end, and start to check consume result") expectRows = 2 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows < expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") diff --git a/tests/system-test/7-tmq/subscribeDb1.py b/tests/system-test/7-tmq/subscribeDb1.py index 28a341f8f353cd16120420fa098ca46b1a458edb..404938158f55b5ff82958b73711054b5b9d5edc6 100644 --- a/tests/system-test/7-tmq/subscribeDb1.py +++ b/tests/system-test/7-tmq/subscribeDb1.py @@ -49,7 +49,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -58,7 +58,7 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -73,11 +73,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -85,20 +85,20 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -111,8 +111,8 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - - event.set() + + event.set() tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) return @@ -141,7 +141,7 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - + def prepareEnv(self, **parameterDict): print ("input parameters:") print (parameterDict) @@ -159,7 +159,7 @@ class TDTestCase: parameterDict["ctbNum"],\ parameterDict["rowsPerTbl"],\ parameterDict["batchNum"],\ - parameterDict["startTs"]) + parameterDict["startTs"]) return def tmqCase6(self, cfgPath, buildPath): @@ -201,10 +201,10 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db60' topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName'])) - + consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] topicList = topicName1 + ',' + topicName2 @@ -215,10 +215,10 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + #consumerId = 1 #self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") @@ -228,16 +228,16 @@ class TDTestCase: self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) # wait for data ready - prepareEnvThread.join() + prepareEnvThread.join() prepareEnvThread2.join() - + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -286,10 +286,10 @@ class TDTestCase: tdLog.info("create topics from db") topicName1 = 'topic_db60' topicName2 = 'topic_db61' - - tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) + + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) tdSql.execute("create topic %s as database %s" %(topicName2, parameterDict2['dbName'])) - + consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"] topicList = topicName1 + ',' + topicName2 @@ -300,10 +300,10 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + consumerId = 1 self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") @@ -313,16 +313,16 @@ class TDTestCase: self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) # wait for data ready - prepareEnvThread.join() + prepareEnvThread.join() prepareEnvThread2.join() - + tdLog.info("insert process end, and start to check consume result") expectRows = 2 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") diff --git a/tests/system-test/7-tmq/subscribeDb2.py b/tests/system-test/7-tmq/subscribeDb2.py index 78aaa2634ca4cf24ed5a5c6790c4e6c786917c1b..4702aef035081abcf51d619fda2a26f666174d1b 100644 --- a/tests/system-test/7-tmq/subscribeDb2.py +++ b/tests/system-test/7-tmq/subscribeDb2.py @@ -50,7 +50,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -59,7 +59,7 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -73,12 +73,12 @@ class TDTestCase: if tdSql.getRows() == expectRows: break else: - time.sleep(5) + time.sleep(5) for i in range(expectRows): tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -89,17 +89,17 @@ class TDTestCase: if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -112,8 +112,8 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - - event.set() + + event.set() tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) return @@ -146,7 +146,7 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - + def prepareEnv(self, **parameterDict): print ("input parameters:") print (parameterDict) @@ -165,7 +165,7 @@ class TDTestCase: parameterDict["ctbNum"],\ parameterDict["rowsPerTbl"],\ parameterDict["batchNum"],\ - parameterDict["startTs"]) + parameterDict["startTs"]) return def tmqCase8(self, cfgPath, buildPath): @@ -188,10 +188,10 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() - + tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = math.ceil(parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2) @@ -203,7 +203,7 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") @@ -214,19 +214,19 @@ class TDTestCase: # wait for data ready prepareEnvThread.join() - + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if not (totalConsumeRows >= expectrowcnt): tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") - + tdLog.info("again start consume processer") self.initConsumerTable() expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -237,7 +237,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -266,10 +266,10 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() - + tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = math.ceil(parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] / 2) @@ -281,7 +281,7 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") @@ -292,14 +292,14 @@ class TDTestCase: # wait for data ready prepareEnvThread.join() - + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + tdSql.query("select count(*) from %s.%s" %(parameterDict['dbName'], parameterDict['stbName'])) countOfStb = tdSql.getData(0,0) print ("====total rows of stb: %d"%countOfStb) @@ -307,7 +307,7 @@ class TDTestCase: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) if totalConsumeRows < expectrowcnt: tdLog.exit("tmq consume rows error!") - + tdLog.info("again start consume processer") self.initConsumerTable() expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -340,7 +340,7 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) self.tmqCase8(cfgPath, buildPath) - self.tmqCase9(cfgPath, buildPath) + self.tmqCase9(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeDb3.py b/tests/system-test/7-tmq/subscribeDb3.py index b576a0ea700c094c64b63a065056a00fda47e59e..e8e475456cfa56240e4c5df5654b5e68eca9324d 100644 --- a/tests/system-test/7-tmq/subscribeDb3.py +++ b/tests/system-test/7-tmq/subscribeDb3.py @@ -49,18 +49,18 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) - tdSql.query("drop table if exists %s.notifyinfo "%(cdbName)) + tdSql.query("drop table if exists %s.notifyinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) tdSql.query("create table %s.notifyinfo (ts timestamp, cmdid int, consumerid int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -75,7 +75,7 @@ class TDTestCase: else: time.sleep(0.1) return - + def getStartCommitNotifyFromTmqsim(self,cdbName='cdb'): while 1: tdSql.query("select * from %s.notifyinfo"%cdbName) @@ -95,12 +95,12 @@ class TDTestCase: if tdSql.getRows() == expectRows: break else: - time.sleep(1) + time.sleep(1) for i in range(expectRows): tdLog.info ("ts: %s, consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 0), tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -111,17 +111,17 @@ class TDTestCase: if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl): - tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) + tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups)) tsql.execute("use %s" %dbName) tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName) pre_create = "create table" @@ -134,8 +134,8 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - - event.set() + + event.set() tdLog.debug("complete to create database[%s], stable[%s] and %d child tables" %(dbName, stbName, ctbNum)) return @@ -164,7 +164,7 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - + def prepareEnv(self, **parameterDict): print ("input parameters:") print (parameterDict) @@ -183,7 +183,7 @@ class TDTestCase: parameterDict["ctbNum"],\ parameterDict["rowsPerTbl"],\ parameterDict["batchNum"],\ - parameterDict["startTs"]) + parameterDict["startTs"]) return def tmqCase10(self, cfgPath, buildPath): @@ -206,10 +206,10 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() - + tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -221,7 +221,7 @@ class TDTestCase: auto.commit.interval.ms:6000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") @@ -242,18 +242,18 @@ class TDTestCase: resultList = self.selectConsumeResult(expectRows) # wait for data ready - prepareEnvThread.join() + prepareEnvThread.join() tdLog.info("insert process end, and start to check consume result") tdLog.info("again start consume processer") self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - + expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -283,10 +283,10 @@ class TDTestCase: prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict) prepareEnvThread.start() - + tdLog.info("create topics from db") topicName1 = 'topic_db1' - + tdSql.execute("create topic %s as database %s" %(topicName1, parameterDict['dbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -298,7 +298,7 @@ class TDTestCase: auto.commit.interval.ms:1000,\ auto.offset.reset:earliest' self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + event.wait() tdLog.info("start consume processor") @@ -307,7 +307,7 @@ class TDTestCase: showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - # time.sleep(6) + # time.sleep(6) tdLog.info("start to wait commit notify") self.getStartCommitNotifyFromTmqsim() @@ -320,18 +320,18 @@ class TDTestCase: # resultList = self.selectConsumeResult(expectRows) # wait for data ready - prepareEnvThread.join() + prepareEnvThread.join() tdLog.info("insert process end, and start to check consume result") tdLog.info("again start consume processer") self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - + expectRows = 1 resultList = self.selectConsumeResult(expectRows) totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows >= expectrowcnt or totalConsumeRows <= 0: tdLog.info("act consume rows: %d, expect consume rows between %d and 0"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") diff --git a/tests/system-test/7-tmq/subscribeDb4.py b/tests/system-test/7-tmq/subscribeDb4.py index 145cbbbbf59604a4b828d3290df0bcfdb129038c..63a59c0dc336b6b26a1e3ffc17806f65be1a013a 100644 --- a/tests/system-test/7-tmq/subscribeDb4.py +++ b/tests/system-test/7-tmq/subscribeDb4.py @@ -47,7 +47,7 @@ class TDTestCase: pollDelay = 20 showMsg = 1 - showRow = 1 + showRow = 1 hostname = socket.gethostname() @@ -59,7 +59,7 @@ class TDTestCase: def tmqCase12(self): tdLog.printNoPrefix("======== test case 12: ") tdLog.info("step 1: create database, stb, ctb and insert data") - + tmqCom.initConsumerTable(self.cdbName) tdCom.create_database(tdSql,self.paraDict["dbName"],self.paraDict["dropFlag"]) @@ -76,20 +76,20 @@ class TDTestCase: tmqCom.insert_data_2(tdSql,self.paraDict["dbName"],self.paraDict["ctbPrefix"],self.paraDict["ctbNum"],self.paraDict["rowsPerTbl"],self.paraDict["batchNum"],self.paraDict["startTs"],self.paraDict["ctbStartIdx"]) tdLog.info("create topics from db") - topicName1 = 'topic_%s'%(self.paraDict['dbName']) + topicName1 = 'topic_%s'%(self.paraDict['dbName']) tdSql.execute("create topic %s as database %s" %(topicName1, self.paraDict['dbName'])) - + topicList = topicName1 keyList = '%s,%s,%s,%s'%(self.groupId,self.autoCommit,self.autoCommitInterval,self.autoOffset) self.expectrowcnt = self.paraDict["rowsPerTbl"] * self.paraDict["ctbNum"] * 2 tmqCom.insertConsumerInfo(self.consumerId, self.expectrowcnt,topicList,keyList,self.ifcheckdata,self.ifManualCommit) - - tdLog.info("start consume processor") + + tdLog.info("start consume processor") tmqCom.startTmqSimProcess(self.pollDelay,self.paraDict["dbName"],self.showMsg, self.showRow,self.cdbName) tdLog.info("After waiting for a period of time, drop one stable") - time.sleep(3) - tdSql.execute("drop table %s.%s" %(self.paraDict['dbName'], self.paraDict['stbName'])) + time.sleep(3) + tdSql.execute("drop table %s.%s" %(self.paraDict['dbName'], self.paraDict['stbName'])) tdLog.info("wait result from consumer, then check it") expectRows = 1 @@ -98,7 +98,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if not (totalConsumeRows >= self.expectrowcnt/2 and totalConsumeRows <= self.expectrowcnt): tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, self.expectrowcnt/2, self.expectrowcnt)) tdLog.exit("tmq consume rows error!") diff --git a/tests/system-test/7-tmq/subscribeStb.py b/tests/system-test/7-tmq/subscribeStb.py index 4f70340b5a6df4daebcda5869117d9e0008d9a48..2757e590a3904dfcd584ae8e35c5e861dcbbb9db 100644 --- a/tests/system-test/7-tmq/subscribeStb.py +++ b/tests/system-test/7-tmq/subscribeStb.py @@ -56,7 +56,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -65,12 +65,12 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -85,11 +85,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -97,14 +97,14 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -134,7 +134,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -149,7 +149,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(stbName,i) for j in range(rowsPerTbl): @@ -168,8 +168,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -188,8 +188,8 @@ class TDTestCase: return def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - + tdLog.printNoPrefix("======== test case 1: ") + self.initConsumerTable() # create and start thread @@ -205,13 +205,13 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -245,7 +245,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -255,8 +255,8 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - + tdLog.printNoPrefix("======== test case 2: ") + self.initConsumerTable() # create and start thread @@ -292,7 +292,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -341,7 +341,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -362,7 +362,7 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/subscribeStb0.py b/tests/system-test/7-tmq/subscribeStb0.py index 65eaab897deb41d6d46f040be89b7f338a719fb1..26e834ae2c4ce898ee57971a8eb0066d0ad24838 100644 --- a/tests/system-test/7-tmq/subscribeStb0.py +++ b/tests/system-test/7-tmq/subscribeStb0.py @@ -56,7 +56,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -65,12 +65,12 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -85,11 +85,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -97,14 +97,14 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -134,7 +134,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -149,7 +149,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(stbName,i) for j in range(rowsPerTbl): @@ -168,8 +168,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -188,8 +188,8 @@ class TDTestCase: return def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - + tdLog.printNoPrefix("======== test case 3: ") + self.initConsumerTable() # create and start thread @@ -213,7 +213,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -231,7 +231,7 @@ class TDTestCase: showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - + time.sleep(1.5) tdLog.info("drop som child table of stb1") dropTblNum = 4 @@ -246,9 +246,9 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) - + tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, remaindrowcnt, expectrowcnt)) if not (totalConsumeRows <= expectrowcnt and totalConsumeRows >= remaindrowcnt): tdLog.exit("tmq consume rows error!") @@ -259,7 +259,7 @@ class TDTestCase: def tmqCase4(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 4: ") - + self.initConsumerTable() # create and start thread @@ -275,7 +275,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -288,7 +288,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -313,7 +313,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -331,7 +331,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -342,7 +342,7 @@ class TDTestCase: def tmqCase5(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 5: ") - + self.initConsumerTable() # create and start thread @@ -358,7 +358,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -371,7 +371,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -396,7 +396,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -414,14 +414,14 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != (expectrowcnt * (1 + 1/4)): tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicFromStb1) - tdLog.printNoPrefix("======== test case 5 end ...... ") + tdLog.printNoPrefix("======== test case 5 end ...... ") def run(self): tdSql.prepare() diff --git a/tests/system-test/7-tmq/subscribeStb1.py b/tests/system-test/7-tmq/subscribeStb1.py index 90d77dba0d66b2929877f656c57e160172e11908..0c49636b158b9b3dc4c810bf4bfd10fac0cc4473 100644 --- a/tests/system-test/7-tmq/subscribeStb1.py +++ b/tests/system-test/7-tmq/subscribeStb1.py @@ -56,7 +56,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -65,12 +65,12 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -85,11 +85,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -97,14 +97,14 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -134,7 +134,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -149,7 +149,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(stbName,i) for j in range(rowsPerTbl): @@ -168,8 +168,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -189,7 +189,7 @@ class TDTestCase: def tmqCase6(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 6: ") - + self.initConsumerTable() # create and start thread @@ -205,7 +205,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -218,7 +218,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -243,7 +243,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -265,7 +265,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -276,7 +276,7 @@ class TDTestCase: def tmqCase7(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 7: ") - + self.initConsumerTable() # create and start thread @@ -292,7 +292,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -305,7 +305,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -330,7 +330,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -348,7 +348,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") diff --git a/tests/system-test/7-tmq/subscribeStb2.py b/tests/system-test/7-tmq/subscribeStb2.py index 74caa139f147b088904ec542aeb85c54b60fa779..2cbb16ec0027930c7f02ae7fc50a69117fb77f9f 100644 --- a/tests/system-test/7-tmq/subscribeStb2.py +++ b/tests/system-test/7-tmq/subscribeStb2.py @@ -56,7 +56,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -65,12 +65,12 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -85,11 +85,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -97,14 +97,14 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -134,7 +134,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -149,7 +149,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(stbName,i) for j in range(rowsPerTbl): @@ -168,8 +168,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -188,8 +188,8 @@ class TDTestCase: return def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - + tdLog.printNoPrefix("======== test case 8: ") + self.initConsumerTable() # create and start thread @@ -218,7 +218,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -243,7 +243,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -263,7 +263,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -283,7 +283,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt*2: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) tdLog.exit("tmq consume rows error!") @@ -293,8 +293,8 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 8 end ...... ") def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - + tdLog.printNoPrefix("======== test case 9: ") + self.initConsumerTable() # create and start thread @@ -323,7 +323,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -348,7 +348,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -372,7 +372,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -392,7 +392,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt*2: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) tdLog.exit("tmq consume rows error!") diff --git a/tests/system-test/7-tmq/subscribeStb3.py b/tests/system-test/7-tmq/subscribeStb3.py index e6eaa175647b78029c4d94ebb1510782dc7d8d49..9c1b3fd241fffd1d3c09f914549291983a5f9505 100644 --- a/tests/system-test/7-tmq/subscribeStb3.py +++ b/tests/system-test/7-tmq/subscribeStb3.py @@ -56,7 +56,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -65,12 +65,12 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -85,11 +85,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -97,14 +97,14 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -134,7 +134,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -149,7 +149,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(stbName,i) for j in range(rowsPerTbl): @@ -168,8 +168,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -188,8 +188,8 @@ class TDTestCase: return def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - + tdLog.printNoPrefix("======== test case 10: ") + self.initConsumerTable() # create and start thread @@ -218,7 +218,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -243,7 +243,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -267,7 +267,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt-10000: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) tdLog.exit("tmq consume rows error!") @@ -291,7 +291,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt*2: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) tdLog.exit("tmq consume rows error!") @@ -302,7 +302,7 @@ class TDTestCase: def tmqCase11(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 11: ") - + self.initConsumerTable() # create and start thread @@ -318,7 +318,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -331,7 +331,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -356,7 +356,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -378,7 +378,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -389,7 +389,7 @@ class TDTestCase: def tmqCase12(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 12: ") - + self.initConsumerTable() # create and start thread @@ -405,7 +405,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -418,7 +418,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -443,7 +443,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -465,7 +465,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -476,7 +476,7 @@ class TDTestCase: def tmqCase13(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 13: ") - + self.initConsumerTable() # create and start thread @@ -492,7 +492,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -505,7 +505,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -530,7 +530,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -553,7 +553,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt*(1/2+1/4): tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) tdLog.exit("tmq consume rows error!") @@ -576,7 +576,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -596,7 +596,7 @@ class TDTestCase: cfgPath = buildPath + "/../sim/psim/cfg" tdLog.info("cfgPath: %s" % cfgPath) - self.tmqCase10(cfgPath, buildPath) + self.tmqCase10(cfgPath, buildPath) self.tmqCase11(cfgPath, buildPath) self.tmqCase12(cfgPath, buildPath) self.tmqCase13(cfgPath, buildPath) diff --git a/tests/system-test/7-tmq/subscribeStb4.py b/tests/system-test/7-tmq/subscribeStb4.py index f3982c8f1f7e2dcdd28f98100ed3817013901b74..33f4c4af1a87bfef576fc0fb702d65732711ab95 100644 --- a/tests/system-test/7-tmq/subscribeStb4.py +++ b/tests/system-test/7-tmq/subscribeStb4.py @@ -56,7 +56,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -65,12 +65,12 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -85,11 +85,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -97,14 +97,14 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -134,7 +134,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -149,7 +149,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(stbName,i) for j in range(rowsPerTbl): @@ -168,8 +168,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -188,8 +188,8 @@ class TDTestCase: return def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - + tdLog.printNoPrefix("======== test case 1: ") + self.initConsumerTable() auotCtbNum = 5 @@ -208,7 +208,7 @@ class TDTestCase: 'batchNum': 100, 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -216,7 +216,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * (auotCtbNum + parameterDict["ctbNum"]) @@ -248,7 +248,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -258,8 +258,8 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - + tdLog.printNoPrefix("======== test case 2: ") + self.initConsumerTable() auotCtbNum = 10 @@ -278,7 +278,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -322,7 +322,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -343,7 +343,7 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) self.tmqCase1(cfgPath, buildPath) - self.tmqCase2(cfgPath, buildPath) + self.tmqCase2(cfgPath, buildPath) def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/tmq3mnodeSwitch.py b/tests/system-test/7-tmq/tmq3mnodeSwitch.py index 2769c3867bbf152269b55fe57d183d97cfd7d58a..305a93128e2a15a53bbc46e2a53bfef8eb41e2c3 100644 --- a/tests/system-test/7-tmq/tmq3mnodeSwitch.py +++ b/tests/system-test/7-tmq/tmq3mnodeSwitch.py @@ -36,7 +36,7 @@ class TDTestCase: tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - + def checkDnodesStatusAndCreateMnode(self,dnodeNumbers): count=0 while count < dnodeNumbers: @@ -44,7 +44,7 @@ class TDTestCase: # tdLog.debug(tdSql.queryResult) dCnt = 0 for i in range(dnodeNumbers): - if tdSql.queryResult[i][self.dnodeStatusIndex] != "ready": + if tdSql.queryResult[i][self.dnodeStatusIndex] != "ready": break else: dCnt += 1 @@ -64,7 +64,7 @@ class TDTestCase: while count < self.mnodeCheckCnt: time.sleep(1) tdSql.query("show mnodes;") - if tdSql.checkRows(self.mnodes) : + if tdSql.checkRows(self.mnodes) : tdLog.debug("mnode is three nodes") else: tdLog.exit("mnode number is correct") @@ -78,17 +78,17 @@ class TDTestCase: break elif roleOfMnode0=='follower' and roleOfMnode1=='leader' and roleOfMnode2 == 'follower' : self.dnodeOfLeader = tdSql.queryResult[1][self.idIndex] - break + break elif roleOfMnode0=='follower' and roleOfMnode1=='follower' and roleOfMnode2 == 'leader' : self.dnodeOfLeader = tdSql.queryResult[2][self.idIndex] - break - else: + break + else: count+=1 else: tdLog.exit("three mnodes is not ready in 10s ") - tdSql.query("show mnodes;") - tdSql.checkRows(self.mnodes) + tdSql.query("show mnodes;") + tdSql.checkRows(self.mnodes) tdSql.checkData(0,self.mnodeEpIndex,'%s:%d'%(self.host,self.startPort)) tdSql.checkData(0,self.mnodeStatusIndex,'ready') tdSql.checkData(1,self.mnodeEpIndex,'%s:%d'%(self.host,self.startPort+self.portStep)) @@ -101,8 +101,8 @@ class TDTestCase: while count < self.mnodeCheckCnt: time.sleep(1) tdSql.query("show mnodes") - tdLog.debug(tdSql.queryResult) - # if tdSql.checkRows(self.mnodes) : + tdLog.debug(tdSql.queryResult) + # if tdSql.checkRows(self.mnodes) : # tdLog.debug("mnode is three nodes") # else: # tdLog.exit("mnode number is correct") @@ -117,21 +117,21 @@ class TDTestCase: break elif roleOfMnode1=='follower' and roleOfMnode2 == 'leader' : self.dnodeOfLeader = tdSql.queryResult[2][self.idIndex] - break + break elif roleOfMnode1=='offline' : if roleOfMnode0=='leader' and roleOfMnode2 == 'follower' : self.dnodeOfLeader = tdSql.queryResult[0][self.idIndex] break elif roleOfMnode0=='follower' and roleOfMnode2 == 'leader' : self.dnodeOfLeader = tdSql.queryResult[2][self.idIndex] - break + break elif roleOfMnode2=='offline' : if roleOfMnode0=='leader' and roleOfMnode1 == 'follower' : self.dnodeOfLeader = tdSql.queryResult[0][self.idIndex] break elif roleOfMnode0=='follower' and roleOfMnode1 == 'leader' : self.dnodeOfLeader = tdSql.queryResult[1][self.idIndex] - break + break count+=1 else: @@ -144,27 +144,27 @@ class TDTestCase: cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) tdLog.info(cmdStr) os.system(cmdStr) - + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() while True: dst = queryFile.readline() src = consumeFile.readline() - + if dst: if dst != src: tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) else: break - return - + return + def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'db1', @@ -195,7 +195,7 @@ class TDTestCase: tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) tdLog.info("async insert data") pThread = tmqCom.asyncInsertData(paraDict) - + tdLog.info("create topics from stb with filter") queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) @@ -234,22 +234,22 @@ class TDTestCase: tdDnodes[1].stoptaosd() time.sleep(10) self.check3mnode1off() - - tdLog.info("switch end and wait insert data end ................") - pThread.join() - tdLog.info("check the consume result") - tdSql.query(queryString) + tdLog.info("switch end and wait insert data end ................") + pThread.join() + + tdLog.info("check the consume result") + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + self.checkFileContent(consumerId, queryString) time.sleep(10) for i in range(len(topicNameList)): diff --git a/tests/system-test/7-tmq/tmqAlterSchema.py b/tests/system-test/7-tmq/tmqAlterSchema.py index a2e20990d9ca43fe62db04c6072a805356f5d514..232a1e11faaf0474ebd12cb8fa83e15bc94bf6da 100644 --- a/tests/system-test/7-tmq/tmqAlterSchema.py +++ b/tests/system-test/7-tmq/tmqAlterSchema.py @@ -36,7 +36,7 @@ class TDTestCase: tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - + def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: topic: select * from stb, while consume, add column int-A/bianry-B/float-C, and then modify B, drop C") tdLog.printNoPrefix("add tag int-A/bianry-B/float-C, and then rename A, modify B, drop C, set t2") @@ -61,7 +61,7 @@ class TDTestCase: topicNameList = ['topic1'] expectRowsList = [] - queryStringList = [] + queryStringList = [] tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) tdLog.info("create stb") @@ -71,15 +71,15 @@ class TDTestCase: # tdLog.info("async insert data") # pThread = tmqCom.asyncInsertData(paraDict) tmqCom.insert_data_2(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"],paraDict["ctbStartIdx"]) - + tdLog.info("create topics from stb with filter") queryStringList.append("select * from %s.%s" %(paraDict['dbName'], paraDict['stbName'])) sqlString = "create topic %s as %s" %(topicNameList[0], queryStringList[0]) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryStringList[0]) - expectRowsList.append(tdSql.getRows()) - + tdSql.query(queryStringList[0]) + expectRowsList.append(tdSql.getRows()) + # init consume info, and start tmq_sim, then check consume result tdLog.info("insert consume info to consume processor") consumerId = 0 @@ -91,14 +91,14 @@ class TDTestCase: tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) dstFile = tmqCom.getResultFileByTaosShell(consumerId, queryStringList[0]) - + tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) tdLog.info("wait the notify info of start consume, then alter schema") tmqCom.getStartConsumeNotifyFromTmqsim() - - # add column double-A/bianry-B/double-C, and then modify B, drop C + + # add column double-A/bianry-B/double-C, and then modify B, drop C sqlString = "alter table %s.%s add column newc1 double"%(paraDict["dbName"],paraDict['stbName']) tdSql.execute(sqlString) sqlString = "alter table %s.%s add column newc2 binary(16)"%(paraDict["dbName"],paraDict['stbName']) @@ -108,8 +108,8 @@ class TDTestCase: sqlString = "alter table %s.%s modify column newc2 binary(32)"%(paraDict["dbName"],paraDict['stbName']) tdSql.execute(sqlString) sqlString = "alter table %s.%s drop column newc3"%(paraDict["dbName"],paraDict['stbName']) - tdSql.execute(sqlString) - # add tag double-A/bianry-B/double-C, and then rename A, modify B, drop C, set t1 + tdSql.execute(sqlString) + # add tag double-A/bianry-B/double-C, and then rename A, modify B, drop C, set t1 sqlString = "alter table %s.%s add tag newt1 double"%(paraDict["dbName"],paraDict['stbName']) tdSql.execute(sqlString) sqlString = "alter table %s.%s add tag newt2 binary(16)"%(paraDict["dbName"],paraDict['stbName']) @@ -125,27 +125,27 @@ class TDTestCase: sqlString = "alter table %s.%s0 set tag newt2='new tag'"%(paraDict["dbName"],paraDict['ctbPrefix']) tdSql.execute(sqlString) - tdLog.info("check the consume result") - tdSql.query(queryStringList[0]) + tdLog.info("check the consume result") + tdSql.query(queryStringList[0]) expectRowsList.append(tdSql.getRows()) expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + tdLog.info("expect consume rows: %d"%(expectRowsList[0])) tdLog.info("act consume rows: %d"%(resultList[0])) - + if expectRowsList[0] != resultList[0]: tdLog.exit("0 tmq consume rows error!") tmqCom.checkTmqConsumeFileContent(consumerId, dstFile) - + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) tdLog.printNoPrefix("======== test case 1 end ...... ") - + def tmqCase2(self): tdLog.printNoPrefix("======== test case 2: topic: select * from ntb, while consume, add column int-A/bianry-B/float-C, and then rename A, modify B, drop C") paraDict = {'dbName': 'db1', @@ -166,12 +166,12 @@ class TDTestCase: 'pollDelay': 10, 'showMsg': 1, 'showRow': 1} - - ntbName = 'ntb' + + ntbName = 'ntb' topicNameList = ['topic1'] expectRowsList = [] - queryStringList = [] + queryStringList = [] tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1) tdLog.info("create stb") @@ -182,15 +182,15 @@ class TDTestCase: # pThread = tmqCom.asyncInsertData(paraDict) tdCom.insert_rows(tdSql, dbname=paraDict["dbName"], tbname=ntbName, column_ele_list=paraDict['colSchema'], start_ts_value=paraDict["startTs"], count=paraDict["rowsPerTbl"]) tdLog.info("insert data end") - + tdLog.info("create topics from ntb with filter") queryStringList.append("select * from %s.%s" %(paraDict['dbName'], ntbName)) sqlString = "create topic %s as %s" %(topicNameList[0], queryStringList[0]) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryStringList[0]) - expectRowsList.append(tdSql.getRows()) - + tdSql.query(queryStringList[0]) + expectRowsList.append(tdSql.getRows()) + # init consume info, and start tmq_sim, then check consume result tdLog.info("insert consume info to consume processor") consumerId = 0 @@ -202,13 +202,13 @@ class TDTestCase: tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) dstFile = tmqCom.getResultFileByTaosShell(consumerId, queryStringList[0]) - + tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) tdLog.info("wait the notify info of start consume, then alter schema") tmqCom.getStartConsumeNotifyFromTmqsim() - + # add column double-A/bianry-B/double-C, and then rename A, modify B, drop C sqlString = "alter table %s.%s add column newc1 double"%(paraDict["dbName"],ntbName) tdSql.execute(sqlString) @@ -223,21 +223,21 @@ class TDTestCase: sqlString = "alter table %s.%s drop column newc3"%(paraDict["dbName"],ntbName) tdSql.execute(sqlString) - tdLog.info("check the consume result") - tdSql.query(queryStringList[0]) + tdLog.info("check the consume result") + tdSql.query(queryStringList[0]) expectRowsList.append(tdSql.getRows()) expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + tdLog.info("expect consume rows: %d"%(expectRowsList[0])) tdLog.info("act consume rows: %d"%(resultList[0])) - + if expectRowsList[0] != resultList[0]: tdLog.exit("0 tmq consume rows error!") tmqCom.checkTmqConsumeFileContent(consumerId, dstFile) - + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqAutoCreateTbl.py b/tests/system-test/7-tmq/tmqAutoCreateTbl.py index 277fdf7afb226a49de00312b3240c70106ae4a4d..a613f11267ef769ef9162543a5818a0881f23abd 100644 --- a/tests/system-test/7-tmq/tmqAutoCreateTbl.py +++ b/tests/system-test/7-tmq/tmqAutoCreateTbl.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 500 self.rowsPerTbl = 1000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,10 +62,10 @@ class TDTestCase: # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctbx",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"]) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return @@ -96,7 +96,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + # tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) # tdLog.info("create stb") @@ -105,12 +105,12 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctb",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"]) tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - + tdSql.execute(sqlString) + consumerId = 0 expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] topicList = topicFromStb1 @@ -134,9 +134,9 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQuery)) - if totalConsumeRows != totalRowsFromQuery: + if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicFromStb1) @@ -144,7 +144,7 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -169,7 +169,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) # tdLog.info("create stb") @@ -182,13 +182,13 @@ class TDTestCase: # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' # queryString = "select ts, c1, c2 from %s.%s "%(paraDict['dbName'], paraDict['stbName']) queryString = "select ts, c1, c2 from %s.%s where t4 == 'shanghai' and t5 == 'shanghai' "%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + consumerId = 1 expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2 topicList = topicFromStb1 @@ -205,7 +205,7 @@ class TDTestCase: tdLog.info("create some new child table and insert data ") tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctby",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"]) - + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -215,9 +215,9 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQuery)) - if totalConsumeRows != totalRowsFromQuery: + if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error!") tdSql.query("drop topic %s"%topicFromStb1) @@ -231,14 +231,14 @@ class TDTestCase: tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.tmqCase1() self.tmqCase2() - + self.prepareTestEnv() tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 self.tmqCase1() self.tmqCase2() - + def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/tmqCheckData.py b/tests/system-test/7-tmq/tmqCheckData.py index 0e55dfa19d33a14703b9d985446bf532537b4925..9338debfa6f5f9161cfa700dd2c3fdbdaa2fa8b7 100644 --- a/tests/system-test/7-tmq/tmqCheckData.py +++ b/tests/system-test/7-tmq/tmqCheckData.py @@ -27,26 +27,26 @@ class TDTestCase: cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) tdLog.info(cmdStr) os.system(cmdStr) - + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() while True: dst = queryFile.readline() src = consumeFile.readline() - + if dst: if dst != src: tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) else: break - return + return def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") @@ -78,13 +78,13 @@ class TDTestCase: tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) tdLog.info("insert data") tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - + tdLog.info("create topics from stb with filter") queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -100,15 +100,15 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + self.checkFileContent(consumerId, queryString) # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() @@ -117,7 +117,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[1], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) consumerId = 1 @@ -127,7 +127,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[1] != resultList[0]: @@ -143,8 +143,8 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[2], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) - expectRowsList.append(tdSql.getRows()) + tdSql.query(queryString) + expectRowsList.append(tdSql.getRows()) consumerId = 2 topicList = topicNameList[2] @@ -153,7 +153,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) # if expectRowsList[2] != resultList[0]: @@ -162,7 +162,7 @@ class TDTestCase: # self.checkFileContent(consumerId, queryString) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqCheckData1.py b/tests/system-test/7-tmq/tmqCheckData1.py index 6cf849d1b989a9b99ea19a7fa77b9483254bd7f8..7c236bbe8bbfb6f0a1c66f3c7e1340081304a9f5 100644 --- a/tests/system-test/7-tmq/tmqCheckData1.py +++ b/tests/system-test/7-tmq/tmqCheckData1.py @@ -27,26 +27,26 @@ class TDTestCase: cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) tdLog.info(cmdStr) os.system(cmdStr) - + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() while True: dst = queryFile.readline() src = consumeFile.readline() - + if dst: if dst != src: tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) else: break - return + return def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") @@ -78,13 +78,13 @@ class TDTestCase: tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) tdLog.info("insert data") tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - + tdLog.info("create topics from stb with filter") queryString = "select ts,c1,c2 from %s.%s" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as stable %s.%s" %(topicNameList[0], paraDict["dbName"],paraDict["stbName"]) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -100,15 +100,15 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") - self.checkFileContent(consumerId, queryString) + self.checkFileContent(consumerId, queryString) # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() @@ -116,7 +116,7 @@ class TDTestCase: sqlString = "create topic %s as database %s" %(topicNameList[1], paraDict['dbName']) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) consumerId = 1 @@ -126,7 +126,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[1] != resultList[0]: @@ -141,7 +141,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[2], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) consumerId = 2 @@ -151,7 +151,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[2] != resultList[0]: @@ -160,7 +160,7 @@ class TDTestCase: self.checkFileContent(consumerId, queryString) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqCommon.py b/tests/system-test/7-tmq/tmqCommon.py index 20cffed486a5ad592c92a4b1e259170764f46c03..b1455ebe48d03e314a5421fbb8fa3bd4d41ab9d9 100644 --- a/tests/system-test/7-tmq/tmqCommon.py +++ b/tests/system-test/7-tmq/tmqCommon.py @@ -41,23 +41,23 @@ class TMQCom: tdSql.init(conn.cursor()) # tdSql.init(conn.cursor(), logSql) # output sql.txt file - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("drop table if exists %s.consumeresult "%(cdbName)) - tdSql.query("drop table if exists %s.notifyinfo "%(cdbName)) + tdSql.query("drop table if exists %s.notifyinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) tdSql.query("create table %s.notifyinfo (ts timestamp, cmdid int, consumerid int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -72,13 +72,13 @@ class TMQCom: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList - + def selectConsumeMsgResult(self,expectRows,cdbName='cdb'): resultList=[] while 1: @@ -88,11 +88,11 @@ class TMQCom: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 2)) - + return resultList def startTmqSimProcess(self,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0,alias=0,snapshot=0): @@ -102,7 +102,7 @@ class TMQCom: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): processorName = buildPath + '\\build\\bin\\tmq_sim.exe' if alias != 0: @@ -111,8 +111,8 @@ class TMQCom: os.system(shellCmd) processorName = processorNameNew shellCmd = 'mintty -h never ' + processorName + ' -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s -e %d "%(pollDelay, dbName, showMsg, showRow, cdbName, snapshot) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s -e %d "%(pollDelay, dbName, showMsg, showRow, cdbName, snapshot) + shellCmd += "> nul 2>&1 &" else: processorName = buildPath + '/build/bin/tmq_sim' if alias != 0: @@ -121,10 +121,10 @@ class TMQCom: os.system(shellCmd) processorName = processorNameNew shellCmd = 'nohup ' + processorName + ' -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s -e %d "%(pollDelay, dbName, showMsg, showRow, cdbName, snapshot) + shellCmd += " -y %d -d %s -g %d -r %d -w %s -e %d "%(pollDelay, dbName, showMsg, showRow, cdbName, snapshot) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) - os.system(shellCmd) + os.system(shellCmd) def stopTmqSimProcess(self, processorName): psCmd = "ps -ef|grep -w %s|grep -v grep | awk '{print $2}'"%(processorName) @@ -149,7 +149,7 @@ class TMQCom: for i in range(actRows): if tdSql.getData(i, 1) == 0: loopFlag = 0 - break + break time.sleep(0.1) return @@ -163,7 +163,7 @@ class TMQCom: for i in range(actRows): if tdSql.getData(i, 1) == 1: loopFlag = 0 - break + break time.sleep(0.1) return @@ -196,7 +196,7 @@ class TMQCom: tagBinaryValue = 'shanghai' elif (i % 3 == 0): tagBinaryValue = 'changsha' - + sql += " %s.%s%d using %s.%s tags(%d, %d, %d, '%s', '%s')"%(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,tagBinaryValue,tagBinaryValue) tblBatched += 1 if (i == ctbNum-1 ) or (tblBatched == batchNum): @@ -206,9 +206,9 @@ class TMQCom: if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables by %s.%s" %(ctbNum, dbName, stbName)) - return + return def drop_ctable(self, tsql, dbname=None, count=1, default_ctbname_prefix="ctb",ctbStartIdx=0): for _ in range(count): @@ -246,7 +246,7 @@ class TMQCom: #print("insert sql:%s"%sql) tsql.execute(sql) tdLog.debug("insert data ............ [OK]") - return + return # schema: (ts timestamp, c1 int, c2 int, c3 binary(16)) def insert_data_1(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs): @@ -373,16 +373,16 @@ class TMQCom: if startTs == 0: t = time.time() startTs = int(round(t * 1000)) - + #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsBatched = 0 + rowsBatched = 0 for i in range(ctbNum): tagBinaryValue = 'beijing' if (i % 2 == 0): tagBinaryValue = 'shanghai' elif (i % 3 == 0): tagBinaryValue = 'changsha' - + sql += " %s.%s%d using %s.%s tags (%d, %d, %d, '%s', '%s') values "%(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,tagBinaryValue,tagBinaryValue) for j in range(rowsPerTbl): sql += "(%d, %d, %d, %d, 'binary_%d', 'nchar_%d', now) "%(startTs+j, j,j, j,i+ctbStartIdx,rowsBatched) @@ -413,7 +413,7 @@ class TMQCom: for i in range(ctbNum): tbName = '%s%s'%(ctbPrefix,i) tdCom.insert_rows(tsql,dbname=paraDict["dbName"],tbname=tbName,start_ts_value=paraDict['startTs'],count=paraDict['rowsPerTbl']) - return + return def threadFunction(self, **paraDict): # create new connector for new tdSql instance in my thread @@ -447,20 +447,20 @@ class TMQCom: cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) tdLog.info(cmdStr) os.system(cmdStr) - + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() - + # skip offset for consumer for i in range(0,skipRowsOfCons): - consumeFile.readline() - + consumeFile.readline() + lines = 0 while True: dst = queryFile.readline() @@ -473,7 +473,7 @@ class TMQCom: tdLog.exit("consumerId %d consume rows[%d] is not match the rows by direct query"%(consumerId, lines)) else: break - return + return def getResultFileByTaosShell(self, consumerId, queryString): buildPath = tdCom.getBuildPath() @@ -483,15 +483,15 @@ class TMQCom: tdLog.info(cmdStr) os.system(cmdStr) return dstFile - - def checkTmqConsumeFileContent(self, consumerId, dstFile): - cfgPath = tdCom.getClientCfgPath() + + def checkTmqConsumeFileContent(self, consumerId, dstFile): + cfgPath = tdCom.getClientCfgPath() consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() lines = 0 @@ -506,7 +506,7 @@ class TMQCom: tdLog.exit("consumerId %d consume rows[%d] is not match the rows by direct query"%(consumerId, lines)) else: break - return + return def create_ntable(self, tsql, dbname=None, tbname_prefix="ntb", tbname_index_start_num = 1, column_elm_list=None, colPrefix='c', tblNum=1, **kwargs): tb_params = "" @@ -538,7 +538,7 @@ class TMQCom: column_value_str = column_value_str.rstrip()[:-1] insert_sql = f'insert into {dbname}.{tbname_prefix}{tblIdx+tbname_index_start_num} values ({column_value_str});' tsql.execute(insert_sql) - + def waitSubscriptionExit(self, tsql, topicName): wait_cnt = 0 while True: @@ -548,7 +548,7 @@ class TMQCom: for idx in range (rows): if tsql.getData(idx, 0) != topicName: continue - + if tsql.getData(idx, 3) == None: continue else: @@ -556,10 +556,10 @@ class TMQCom: wait_cnt += 1 exit_flag = 0 break - + if exit_flag == 1: break - + tsql.query("show subscriptions") tdLog.info("show subscriptions:") tdLog.info(tsql.queryResult) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py index d22d183e863d26a0c0498e37861b2f526fe4262d..20b0c65c714cda7709ba80d6289c628d0de2922e 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 1 self.ctbNum = 1 self.rowsPerTbl = 100000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.start(1) @@ -94,18 +94,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -120,18 +120,18 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("%d tmq consume rows error!"%consumerId) - tmqCom.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -162,11 +162,11 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) @@ -174,7 +174,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -190,60 +190,60 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) firstConsumeRows = resultList[0] - + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) - tdLog.exit("%d tmq consume rows error!"%consumerId) + tdLog.exit("%d tmq consume rows error!"%consumerId) # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 2 expectrowcnt = math.ceil(totalRowsInserted/3) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 - resultList = tmqCom.selectConsumeResult(expectRows) + resultList = tmqCom.selectConsumeResult(expectRows) secondConsumeRows = resultList[0] - + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - + # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 3 expectrowcnt = math.ceil(totalRowsInserted/3) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 - resultList = tmqCom.selectConsumeResult(expectRows) + resultList = tmqCom.selectConsumeResult(expectRows) thirdConsumeRows = resultList[0] - + if not (totalRowsInserted >= resultList[0]): tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - - # total consume + + # total consume actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows - + if not (totalRowsInserted == actConsumeTotalRows): tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py index 951a747069a2ad279dec56fd64781609ec2aa0f4..494952ecd5538b47d2bf59167cb6f4d934049f13 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 1 self.ctbNum = 1 self.rowsPerTbl = 100000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.start(1) @@ -94,18 +94,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -120,18 +120,18 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("%d tmq consume rows error!"%consumerId) - tmqCom.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -162,18 +162,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -189,60 +189,60 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) firstConsumeRows = resultList[0] - + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) - tdLog.exit("%d tmq consume rows error!"%consumerId) + tdLog.exit("%d tmq consume rows error!"%consumerId) # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 2 expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1/3) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 - resultList = tmqCom.selectConsumeResult(expectRows) + resultList = tmqCom.selectConsumeResult(expectRows) secondConsumeRows = resultList[0] - + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - + # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 3 expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 1/3) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 - resultList = tmqCom.selectConsumeResult(expectRows) + resultList = tmqCom.selectConsumeResult(expectRows) thirdConsumeRows = resultList[0] - + if not (totalRowsInserted >= resultList[0]): tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - - # total consume + + # total consume actConsumeTotalRows = firstConsumeRows + secondConsumeRows + thirdConsumeRows - + if not (totalRowsInserted == actConsumeTotalRows): tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py index 6ee089af4e1fd53d4de7490db94f9847cb1d7aee..666ca6ca646937fb29ea9ec6346ebd97e5351dae 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 3000 self.rowsPerTbl = 150 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.starttaosd(1) @@ -94,11 +94,11 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) @@ -106,7 +106,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -121,18 +121,18 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("%d tmq consume rows error!"%consumerId) - # tmqCom.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -163,11 +163,11 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) @@ -175,7 +175,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] tdLog.info("select result rows: %d"%totalRowsInserted) @@ -192,15 +192,15 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - + firstConsumeRows = resultList[0] # reinit consume info, and start tmq_sim, then check consume result @@ -208,22 +208,22 @@ class TDTestCase: consumerId = 2 expectrowcnt = math.ceil(totalRowsInserted*2/3) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeTotalRows = firstConsumeRows + resultList[0] - + if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows): - tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) + tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py index 882989bfb68a7f3a77ca8eaefb74138cf1283dae..e4ce3b0f773218a78c411d1aae31aca8da11c350 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 3000 self.rowsPerTbl = 70 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # # tdDnodes.start(1) @@ -95,18 +95,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -121,18 +121,18 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("%d tmq consume rows error!"%consumerId) - # tmqCom.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -163,18 +163,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -190,15 +190,15 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - + firstConsumeRows = resultList[0] # reinit consume info, and start tmq_sim, then check consume result @@ -206,22 +206,22 @@ class TDTestCase: consumerId = 2 expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeTotalRows = firstConsumeRows + resultList[0] - + if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows): - tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) + tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py index c334ff752bf7ec5753fc68f8d627b73ee86a6482..da7d9e4651325c45ef071285bea03a4b4984c84c 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 10 self.rowsPerTbl = 10000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # # tdDnodes.start(1) @@ -95,18 +95,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -121,18 +121,18 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("%d tmq consume rows error!"%consumerId) - # tmqCom.checkFileContent(consumerId, queryString) + # tmqCom.checkFileContent(consumerId, queryString) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -163,18 +163,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -190,15 +190,15 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - + firstConsumeRows = resultList[0] # reinit consume info, and start tmq_sim, then check consume result @@ -206,22 +206,22 @@ class TDTestCase: consumerId = 2 expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeTotalRows = firstConsumeRows + resultList[0] - + if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows): - tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) + tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb.py b/tests/system-test/7-tmq/tmqConsFromTsdb.py index a4a242365aa6ddb8c0040cbde7d8adfb199177ad..b3bb5f84e43f8257eeb60e16a4f149e28d0a1496 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 1 self.ctbNum = 10 self.rowsPerTbl = 10000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.start(1) @@ -94,18 +94,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -120,18 +120,18 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("%d tmq consume rows error!"%consumerId) - tmqCom.checkFileContent(consumerId, queryString) + tmqCom.checkFileContent(consumerId, queryString) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -162,18 +162,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -189,15 +189,15 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if not (expectrowcnt <= resultList[0] and totalRowsInserted >= resultList[0]): tdLog.info("act consume rows: %d, expect consume rows between %d and %d"%(resultList[0], expectrowcnt, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - + firstConsumeRows = resultList[0] # reinit consume info, and start tmq_sim, then check consume result @@ -205,22 +205,22 @@ class TDTestCase: consumerId = 2 expectrowcnt = math.ceil(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2/3) tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeTotalRows = firstConsumeRows + resultList[0] - + if not (expectrowcnt >= resultList[0] and totalRowsInserted == actConsumeTotalRows): - tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) + tdLog.info("act consume rows, first: %d, second: %d "%(firstConsumeRows, resultList[0])) tdLog.info("and sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py index ec11b3286c7a100fa3a5a552f877c11a7ca13b34..07fb9c77515411f43035374ba8c46972777788cd 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 1 self.ctbNum = 1 self.rowsPerTbl = 1000000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.start(1) @@ -93,11 +93,11 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) @@ -105,7 +105,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -125,17 +125,17 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 2 resultList = tmqCom.selectConsumeResult(expectRows) actConsumeTotalRows = resultList[0] + resultList[1] - + if not (totalRowsInserted == actConsumeTotalRows): tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -166,11 +166,11 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+9379) # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) @@ -178,7 +178,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -194,35 +194,35 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("wait commit notify") tmqCom.getStartCommitNotifyFromTmqsim() tdLog.info("pkill consume processor") tdCom.killProcessor("tmq_sim") - + # time.sleep(10) - + # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 6 tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tdLog.info("wait the consume result") - + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeTotalRows = resultList[0] - + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): - tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py index 4878652593d673aeabba66fe6dd2f931ea7156b5..ecdb0a43589227fd3bcad51ba3e2292618b86bef 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 1 self.ctbNum = 1 self.rowsPerTbl = 100000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.start(1) @@ -93,18 +93,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -124,17 +124,17 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 2 resultList = tmqCom.selectConsumeResult(expectRows) actConsumeTotalRows = resultList[0] + resultList[1] - + if not (totalRowsInserted == actConsumeTotalRows): tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -165,18 +165,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -192,35 +192,35 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("wait commit notify") tmqCom.getStartCommitNotifyFromTmqsim() tdLog.info("pkill consume processor") tdCom.killProcessor("tmq_sim") - + # time.sleep(10) - + # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 6 tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tdLog.info("wait the consume result") - + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeTotalRows = resultList[0] - + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): - tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py index 451dc43343409e7feeb6db5705cdf4a4151caa6d..05f7030169490c1e9bf2dbe321efee4cbe0bd7d0 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 4000 self.rowsPerTbl = 150 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.start(1) @@ -93,11 +93,11 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 == 0) and (ts >= %d) and (t4 like 'shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/5)) # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) @@ -105,7 +105,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] tdLog.info("select result rows: %d"%totalRowsInserted) @@ -126,12 +126,12 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 2 resultList = tmqCom.selectConsumeResult(expectRows) actConsumeTotalRows = resultList[0] + resultList[1] - + if not (totalRowsInserted == actConsumeTotalRows): tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) @@ -167,11 +167,11 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select ts, acos(c1), ceil(pow(c1,3)) from %s.%s where (sin(c2) >= 0) and (c1 %% 4 != 0) and (ts+1a >= %d) and (t4 like '%%shanghai')"%(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+math.ceil(self.rowsPerTbl/10)) # queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) @@ -179,7 +179,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] tdLog.info("select result rows: %d"%totalRowsInserted) @@ -196,7 +196,7 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("wait commit notify") tmqCom.getStartCommitNotifyFromTmqsim() # tdLog.info("wait start consume notify") @@ -204,29 +204,29 @@ class TDTestCase: tdLog.info("pkill consume processor") tdCom.killProcessor("tmq_sim") - + # time.sleep(10) - + # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 6 tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tdLog.info("wait the consume result") - + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeTotalRows = resultList[0] - + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): - tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py index 3b3467f9f99807262864940deefe6e96c95a8af4..232d90848fc08c74c46308147942c90e85e7d509 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 3000 self.rowsPerTbl = 70 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.start(1) @@ -93,18 +93,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -124,17 +124,17 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 2 resultList = tmqCom.selectConsumeResult(expectRows) actConsumeTotalRows = resultList[0] + resultList[1] - + if not (totalRowsInserted == actConsumeTotalRows): tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -165,18 +165,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -192,35 +192,35 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("wait commit notify") tmqCom.getStartCommitNotifyFromTmqsim() tdLog.info("pkill consume processor") tdCom.killProcessor("tmq_sim") - + # time.sleep(10) - + # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 6 tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tdLog.info("wait the consume result") - + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeTotalRows = resultList[0] - + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): - tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py index d1fe69f0b7acbcc374df5c08b3498c75083f0f70..5841c6d60534eef7a98c5fd6aa0abe629fc4ebb7 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 10 self.rowsPerTbl = 10000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.start(1) @@ -93,18 +93,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -124,17 +124,17 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 2 resultList = tmqCom.selectConsumeResult(expectRows) actConsumeTotalRows = resultList[0] + resultList[1] - + if not (totalRowsInserted == actConsumeTotalRows): tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -165,18 +165,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -192,35 +192,35 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("wait commit notify") tmqCom.getStartCommitNotifyFromTmqsim() tdLog.info("pkill consume processor") tdCom.killProcessor("tmq_sim") - + # time.sleep(10) - + # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 6 tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tdLog.info("wait the consume result") - + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeTotalRows = resultList[0] - + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): - tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1.py b/tests/system-test/7-tmq/tmqConsFromTsdb1.py index 597f7968ae26d17590e0b611c07539ded2990b9b..499f837ccc806240889fc867a7e31ddff1551e96 100644 --- a/tests/system-test/7-tmq/tmqConsFromTsdb1.py +++ b/tests/system-test/7-tmq/tmqConsFromTsdb1.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 1 self.ctbNum = 10 self.rowsPerTbl = 10000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -62,7 +62,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.start(1) @@ -93,18 +93,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -124,17 +124,17 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") - + tdLog.info("wait the consume result") + expectRows = 2 resultList = tmqCom.selectConsumeResult(expectRows) actConsumeTotalRows = resultList[0] + resultList[1] - + if not (totalRowsInserted == actConsumeTotalRows): tdLog.info("sum of two consume rows: %d should be equal to total inserted rows: %d"%(actConsumeTotalRows, totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -165,18 +165,18 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + topicNameList = ['topic1'] expectRowsList = [] tmqCom.initConsumerTable() - + tdLog.info("create topics from stb with filter") queryString = "select * from %s.%s"%(paraDict['dbName'], paraDict['stbName']) # sqlString = "create topic %s as stable %s" %(topicNameList[0], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) totalRowsInserted = expectRowsList[0] @@ -192,35 +192,35 @@ class TDTestCase: tdLog.info("start consume processor 0") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("wait commit notify") tmqCom.getStartCommitNotifyFromTmqsim() tdLog.info("pkill consume processor") tdCom.killProcessor("tmq_sim") - + # time.sleep(10) - + # reinit consume info, and start tmq_sim, then check consume result tmqCom.initConsumerTable() consumerId = 6 tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tdLog.info("wait the consume result") - + expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + actConsumeTotalRows = resultList[0] - + if not (actConsumeTotalRows > 0 and actConsumeTotalRows < totalRowsInserted): - tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) + tdLog.info("act consume rows: %d"%(actConsumeTotalRows)) tdLog.info("and second consume rows should be between 0 and %d"%(totalRowsInserted)) tdLog.exit("%d tmq consume rows error!"%consumerId) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqConsumerGroup.py b/tests/system-test/7-tmq/tmqConsumerGroup.py index bfd63fd4a25e6fc6eb207efa43a8c2a598c5b962..b5cbfb8a5115d5bc268e264ea9d9d5cdd1b6bef7 100644 --- a/tests/system-test/7-tmq/tmqConsumerGroup.py +++ b/tests/system-test/7-tmq/tmqConsumerGroup.py @@ -27,26 +27,26 @@ class TDTestCase: cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) tdLog.info(cmdStr) os.system(cmdStr) - + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() while True: dst = queryFile.readline() src = consumeFile.readline() - + if dst: if dst != src: tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) else: break - return + return def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") @@ -78,7 +78,7 @@ class TDTestCase: tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) tdLog.info("insert data") tmqCom.insert_data_2(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - + tdLog.info("create topics from stb with filter") # queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s" %(paraDict['dbName'], paraDict['stbName']) @@ -114,7 +114,7 @@ class TDTestCase: ifManualCommit = 1 keyList = 'group.id:cgrp1, enable.auto.commit:true, auto.commit.interval.ms:3000, auto.offset.reset:earliest' tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - + tdLog.info("start consume processor 1") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow']) @@ -132,7 +132,7 @@ class TDTestCase: pThread.join() - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 2 resultList = tmqCom.selectConsumeResult(expectRows) actTotalRows = 0 @@ -140,7 +140,7 @@ class TDTestCase: actTotalRows += resultList[i] tdSql.query(queryString) - expectRowsList.append(tdSql.getRows()) + expectRowsList.append(tdSql.getRows()) expectTotalRows = 0 for i in range(len(expectRowsList)): expectTotalRows += expectRowsList[i] @@ -150,7 +150,7 @@ class TDTestCase: tdLog.info("act consume rows: %d should >= expect consume rows: %d"%(actTotalRows, expectTotalRows)) tdLog.exit("0 tmq consume rows error!") - # time.sleep(10) + # time.sleep(10) # for i in range(len(topicNameList)): # tdSql.query("drop topic %s"%topicNameList[i]) diff --git a/tests/system-test/7-tmq/tmqDelete-1ctb.py b/tests/system-test/7-tmq/tmqDelete-1ctb.py index bedb36e505fa0f0d136d33aeeb472287d7566c54..8329b00145d71ab6df045f23fb20dc015034a2c2 100644 --- a/tests/system-test/7-tmq/tmqDelete-1ctb.py +++ b/tests/system-test/7-tmq/tmqDelete-1ctb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 1 self.rowsPerTbl = 10000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1,wal_retention_size=-1, wal_retention_period=-1) tdLog.info("create stb") @@ -65,11 +65,11 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return - + def delData(self,tsql,dbName,ctbPrefix,ctbNum,startTs=0,endTs=0,ctbStartIdx=0): tdLog.debug("start to del data ............") for i in range(ctbNum): @@ -84,12 +84,12 @@ class TDTestCase: newTdSql = tdCom.newTdSql() self.delData(newTdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["startTs"],paraDict["endTs"],paraDict["ctbStartIdx"]) return - + def asyncDeleteData(self, paraDict): pThread = threading.Thread(target=self.threadFunctionForDeletaData, kwargs=paraDict) pThread.start() return pThread - + def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'dbt', @@ -116,29 +116,29 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - # del some data + + # del some data rowsOfDelete = int(paraDict["rowsPerTbl"] / 4) paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1 - self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"], + self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"], startTs=paraDict["startTs"], endTs=paraDict["endTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - + tdSql.execute(sqlString) + if self.snapshot == 0: consumerId = 0 elif self.snapshot == 1: consumerId = 1 rowsOfDelete = 0 - + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"]) topicList = topicFromStb1 ifcheckdata = 1 @@ -161,23 +161,23 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, expect consume rows: %d, act query rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsFromQuery)) - + if self.snapshot == 0: if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error with snapshot = 0!") elif self.snapshot == 1: if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error with snapshot = 1!") - - tmqCom.checkFileContent(consumerId=consumerId, queryString=queryString, skipRowsOfCons=rowsOfDelete) + + tmqCom.checkFileContent(consumerId=consumerId, queryString=queryString, skipRowsOfCons=rowsOfDelete) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -197,50 +197,50 @@ class TDTestCase: 'showMsg': 1, 'showRow': 1, 'snapshot': 0} - + paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - tdLog.info("restart taosd to ensure that the data falls into the disk") + + tdLog.info("restart taosd to ensure that the data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) - + # update to 1/4 rows and insert 3/4 new rows paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4) # paraDict['rowsPerTbl'] = self.rowsPerTbl tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # del some data rowsOfDelete = int(self.rowsPerTbl / 4 ) paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1 - self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"], + self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"], startTs=paraDict["startTs"], endTs=paraDict["endTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tmqCom.initConsumerTable() tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - consumerId = 1 - + consumerId = 1 + if self.snapshot == 0: - consumerId = 2 + consumerId = 2 expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4)) elif self.snapshot == 1: consumerId = 3 expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -252,7 +252,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -262,24 +262,24 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt)) - + if self.snapshot == 0: if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error with snapshot = 0!") elif self.snapshot == 1: if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error with snapshot = 1!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 2 end ...... ") def tmqCase3(self): - tdLog.printNoPrefix("======== test case 3: ") + tdLog.printNoPrefix("======== test case 3: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -299,34 +299,34 @@ class TDTestCase: 'showMsg': 1, 'showRow': 1, 'snapshot': 0} - + paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - tdLog.info("restart taosd to ensure that the data falls into the disk") + + tdLog.info("restart taosd to ensure that the data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) - + tmqCom.initConsumerTable() tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - consumerId = 1 - + consumerId = 1 + if self.snapshot == 0: - consumerId = 4 + consumerId = 4 expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4)) elif self.snapshot == 1: consumerId = 5 expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -340,20 +340,20 @@ class TDTestCase: rowsOfDelete = int(self.rowsPerTbl / 4 ) paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1 pDeleteThread = self.asyncDeleteData(paraDict) - + tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + # update to 1/4 rows and insert 3/4 new rows paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4) # paraDict['rowsPerTbl'] = self.rowsPerTbl # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"], # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict) pInsertThread.join() - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -363,17 +363,17 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt)) - + if self.snapshot == 0: if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error with snapshot = 0!") elif self.snapshot == 1: if not ((totalConsumeRows >= totalRowsFromQuery) and (totalConsumeRows <= expectrowcnt)): tdLog.exit("tmq consume rows error with snapshot = 1!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) @@ -385,17 +385,17 @@ class TDTestCase: tdLog.printNoPrefix("=============================================") tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.snapshot = 0 - self.prepareTestEnv() + self.prepareTestEnv() self.tmqCase1() - self.tmqCase2() - + self.tmqCase2() + tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 self.prepareTestEnv() self.tmqCase1() - self.tmqCase2() - + self.tmqCase2() + tdLog.printNoPrefix("=============================================") tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.snapshot = 0 @@ -404,7 +404,7 @@ class TDTestCase: tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 - self.prepareTestEnv() + self.prepareTestEnv() self.tmqCase3() def stop(self): diff --git a/tests/system-test/7-tmq/tmqDelete-multiCtb.py b/tests/system-test/7-tmq/tmqDelete-multiCtb.py index 94ca16bc6f4b6abf260f291380e1c1f0079d5fa1..e59040305a4dbd43a1bb74012c9c6f295770352a 100644 --- a/tests/system-test/7-tmq/tmqDelete-multiCtb.py +++ b/tests/system-test/7-tmq/tmqDelete-multiCtb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 100 self.rowsPerTbl = 1000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1,wal_retention_size=-1, wal_retention_period=-1) tdLog.info("create stb") @@ -65,11 +65,11 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return - + def delData(self,tsql,dbName,ctbPrefix,ctbNum,startTs=0,endTs=0,ctbStartIdx=0): tdLog.debug("start to del data ............") for i in range(ctbNum): @@ -84,12 +84,12 @@ class TDTestCase: newTdSql = tdCom.newTdSql() self.delData(newTdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["startTs"],paraDict["endTs"],paraDict["ctbStartIdx"]) return - + def asyncDeleteData(self, paraDict): pThread = threading.Thread(target=self.threadFunctionForDeletaData, kwargs=paraDict) pThread.start() return pThread - + def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") paraDict = {'dbName': 'dbt', @@ -116,29 +116,29 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - # del some data + + # del some data rowsOfDelete = int(paraDict["rowsPerTbl"] / 4) paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1 - self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"], + self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"], startTs=paraDict["startTs"], endTs=paraDict["endTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - + tdSql.execute(sqlString) + if self.snapshot == 0: consumerId = 0 elif self.snapshot == 1: consumerId = 1 rowsOfDelete = 0 - + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"]) topicList = topicFromStb1 ifcheckdata = 1 @@ -161,23 +161,23 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, expect consume rows: %d, act query rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsFromQuery)) - + if self.snapshot == 0: if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error with snapshot = 0!") elif self.snapshot == 1: if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error with snapshot = 1!") - - # tmqCom.checkFileContent(consumerId=consumerId, queryString=queryString, skipRowsOfCons=rowsOfDelete) + + # tmqCom.checkFileContent(consumerId=consumerId, queryString=queryString, skipRowsOfCons=rowsOfDelete) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -197,50 +197,50 @@ class TDTestCase: 'showMsg': 1, 'showRow': 1, 'snapshot': 0} - + paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - tdLog.info("restart taosd to ensure that the data falls into the disk") + + tdLog.info("restart taosd to ensure that the data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) - + # update to 1/4 rows and insert 3/4 new rows paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4) # paraDict['rowsPerTbl'] = self.rowsPerTbl tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # del some data rowsOfDelete = int(self.rowsPerTbl / 4 ) paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1 - self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"], + self.delData(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"], startTs=paraDict["startTs"], endTs=paraDict["endTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tmqCom.initConsumerTable() tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - consumerId = 1 - + consumerId = 1 + if self.snapshot == 0: - consumerId = 2 + consumerId = 2 expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4)) elif self.snapshot == 1: consumerId = 3 expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -252,7 +252,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -262,24 +262,24 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt)) - + if self.snapshot == 0: if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error with snapshot = 0!") elif self.snapshot == 1: if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error with snapshot = 1!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 2 end ...... ") def tmqCase3(self): - tdLog.printNoPrefix("======== test case 3: ") + tdLog.printNoPrefix("======== test case 3: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -299,34 +299,34 @@ class TDTestCase: 'showMsg': 1, 'showRow': 1, 'snapshot': 0} - + paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - tdLog.info("restart taosd to ensure that the data falls into the disk") + + tdLog.info("restart taosd to ensure that the data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) - + tmqCom.initConsumerTable() tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - consumerId = 1 - + consumerId = 1 + if self.snapshot == 0: - consumerId = 4 + consumerId = 4 expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/4 + 3/4)) elif self.snapshot == 1: consumerId = 5 expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 - 1/4 + 1/4 + 3/4)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -340,20 +340,20 @@ class TDTestCase: rowsOfDelete = int(self.rowsPerTbl / 4 ) paraDict["endTs"] = paraDict["startTs"] + rowsOfDelete - 1 pDeleteThread = self.asyncDeleteData(paraDict) - + tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + # update to 1/4 rows and insert 3/4 new rows paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl * 3 / 4) # paraDict['rowsPerTbl'] = self.rowsPerTbl # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"], # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict) pInsertThread.join() - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -363,47 +363,47 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt)) - + if self.snapshot == 0: if totalConsumeRows < expectrowcnt: tdLog.exit("tmq consume rows error with snapshot = 0!") elif self.snapshot == 1: if not ((totalConsumeRows >= totalRowsFromQuery) and (totalConsumeRows <= expectrowcnt)): tdLog.exit("tmq consume rows error with snapshot = 1!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 3 end ...... ") - def run(self): + def run(self): tdLog.printNoPrefix("=============================================") tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.snapshot = 0 self.prepareTestEnv() self.tmqCase1() - self.tmqCase2() - + self.tmqCase2() + tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 self.prepareTestEnv() self.tmqCase1() self.tmqCase2() - + tdLog.printNoPrefix("=============================================") tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.snapshot = 0 - self.prepareTestEnv() - self.tmqCase3() + self.prepareTestEnv() + self.tmqCase3() tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 - self.prepareTestEnv() + self.prepareTestEnv() self.tmqCase3() def stop(self): diff --git a/tests/system-test/7-tmq/tmqDnode.py b/tests/system-test/7-tmq/tmqDnode.py index 235e9ef971ba57e3bc945494e0c9b934580f8a79..802993e9247254b069383d67c050a4449ee7a1e4 100644 --- a/tests/system-test/7-tmq/tmqDnode.py +++ b/tests/system-test/7-tmq/tmqDnode.py @@ -56,7 +56,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -65,12 +65,12 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -85,11 +85,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -98,9 +98,9 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -130,7 +130,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -149,7 +149,7 @@ class TDTestCase: ctbDict[i] = 0 #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfCtb = 0 + rowsOfCtb = 0 while rowsOfCtb < rowsPerTbl: for i in range(ctbNum): sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) @@ -176,7 +176,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(ctbPrefix,i) for j in range(rowsPerTbl): @@ -207,7 +207,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) for j in range(rowsPerTbl): @@ -226,8 +226,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -246,8 +246,8 @@ class TDTestCase: return def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - + tdLog.printNoPrefix("======== test case 1: ") + self.initConsumerTable() # create and start thread @@ -264,7 +264,7 @@ class TDTestCase: 'batchNum': 23, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbPrefix"], parameterDict["ctbNum"]) @@ -272,7 +272,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -303,7 +303,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -313,8 +313,8 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - + tdLog.printNoPrefix("======== test case 2: ") + self.initConsumerTable() # create and start thread @@ -339,7 +339,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] * 2 @@ -373,7 +373,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -387,8 +387,8 @@ class TDTestCase: # 自动建表完成数据插入,启动消费 def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - + tdLog.printNoPrefix("======== test case 3: ") + self.initConsumerTable() # create and start thread @@ -414,7 +414,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -444,7 +444,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -466,7 +466,7 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) # self.tmqCase1(cfgPath, buildPath) - # self.tmqCase2(cfgPath, buildPath) + # self.tmqCase2(cfgPath, buildPath) self.tmqCase3(cfgPath, buildPath) # self.tmqCase4(cfgPath, buildPath) # self.tmqCase5(cfgPath, buildPath) diff --git a/tests/system-test/7-tmq/tmqDnodeRestart.py b/tests/system-test/7-tmq/tmqDnodeRestart.py index 9a11106e3ea464415815dc020870f60cbaf21fb7..1902945bf6acf29bd838ab45c8629310e93570c1 100644 --- a/tests/system-test/7-tmq/tmqDnodeRestart.py +++ b/tests/system-test/7-tmq/tmqDnodeRestart.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 2 self.ctbNum = 100 self.rowsPerTbl = 1000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -51,7 +51,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1,wal_retention_size=-1, wal_retention_period=-1) tdLog.info("create stb") @@ -63,7 +63,7 @@ class TDTestCase: tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + tdLog.info("restart taosd to ensure that the data falls into the disk") # tdDnodes.stop(1) # tdDnodes.start(1) @@ -96,7 +96,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + # tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) # tdLog.info("create stb") @@ -110,12 +110,12 @@ class TDTestCase: # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + consumerId = 0 expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2 topicList = topicFromStb1 @@ -129,7 +129,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + # time.sleep(3) tmqCom.getStartConsumeNotifyFromTmqsim() tdLog.info("================= restart dnode ===========================") @@ -146,7 +146,7 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQury = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d"%(totalConsumeRows, totalRowsFromQury)) if not (totalConsumeRows == totalRowsFromQury): tdLog.exit("tmq consume rows error!") @@ -157,7 +157,7 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -182,7 +182,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() # tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) # tdLog.info("create stb") @@ -195,12 +195,12 @@ class TDTestCase: # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + consumerId = 1 expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2 topicList = topicFromStb1 @@ -225,7 +225,7 @@ class TDTestCase: paraDict["batchNum"] = 100 paraDict["ctbPrefix"] = 'newCtb' tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"]) - + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -235,7 +235,7 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, totalRowsFromQuery)) if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error!") @@ -249,7 +249,7 @@ class TDTestCase: # tdSql.prepare() self.prepareTestEnv() self.tmqCase1() - self.tmqCase2() + self.tmqCase2() def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py index 650d9188285580b649fca67cd1971ed9ae9c7041..b952dc2d572aa85e1819519d40e563086df1c4a9 100644 --- a/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py @@ -20,11 +20,11 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 1000 self.rowsPerTbl = 10 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) - + # drop some ntbs def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") @@ -51,8 +51,8 @@ class TDTestCase: paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum - paraDict['rowsPerTbl'] = self.rowsPerTbl - + paraDict['rowsPerTbl'] = self.rowsPerTbl + tmqCom.initConsumerTable() tdLog.info("start create database....") tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) @@ -60,11 +60,11 @@ class TDTestCase: tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"]) tdLog.info("start insert data into normal tables....") tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"]) - + tdLog.info("create topics from database") - topicFromDb = 'topic_dbt' + topicFromDb = 'topic_dbt' tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName'])) - + if self.snapshot == 0: consumerId = 0 elif self.snapshot == 1: @@ -83,13 +83,13 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tmqCom.getStartConsumeNotifyFromTmqsim() + tmqCom.getStartConsumeNotifyFromTmqsim() tdLog.info("drop some ntables") # drop 1/4 ctbls from half offset paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 1 / 2) paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4) tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"]) - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -98,19 +98,19 @@ class TDTestCase: totalConsumeRows += resultList[i] tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - + if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)): tdLog.exit("tmq consume rows error with snapshot = 0!") - - tdLog.info("wait subscriptions exit ....") + + tdLog.info("wait subscriptions exit ....") tmqCom.waitSubscriptionExit(tdSql, topicFromDb) - + tdSql.query("drop topic %s"%topicFromDb) tdLog.info("success dorp topic: %s"%topicFromDb) tdLog.printNoPrefix("======== test case 1 end ...... ") - - + + # drop some ntbs and create some new ntbs def tmqCase2(self): tdLog.printNoPrefix("======== test case 2: ") @@ -137,8 +137,8 @@ class TDTestCase: paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum - paraDict['rowsPerTbl'] = self.rowsPerTbl - + paraDict['rowsPerTbl'] = self.rowsPerTbl + tmqCom.initConsumerTable() tdLog.info("start create database....") tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) @@ -146,11 +146,11 @@ class TDTestCase: tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"]) tdLog.info("start insert data into normal tables....") tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"]) - + tdLog.info("create topics from database") - topicFromDb = 'topic_dbt' + topicFromDb = 'topic_dbt' tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName'])) - + if self.snapshot == 0: consumerId = 2 elif self.snapshot == 1: @@ -169,20 +169,20 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tmqCom.getStartConsumeNotifyFromTmqsim() + tmqCom.getStartConsumeNotifyFromTmqsim() tdLog.info("drop some ntables") # drop 1/4 ctbls from half offset paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 1 / 2) paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4) tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"]) - + tdLog.info("start create some new normal tables....") paraDict["ctbPrefix"] = 'newCtb' paraDict["ctbNum"] = self.ctbNum tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"]) tdLog.info("start insert data into these new normal tables....") tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"]) - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -191,24 +191,24 @@ class TDTestCase: totalConsumeRows += resultList[i] tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - + if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)): tdLog.exit("tmq consume rows error with snapshot = 0!") - - tdLog.info("wait subscriptions exit ....") + + tdLog.info("wait subscriptions exit ....") tmqCom.waitSubscriptionExit(tdSql, topicFromDb) - + tdSql.query("drop topic %s"%topicFromDb) tdLog.info("success dorp topic: %s"%topicFromDb) tdLog.printNoPrefix("======== test case 2 end ...... ") - - def run(self): + + def run(self): tdLog.printNoPrefix("=============================================") tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.snapshot = 0 self.tmqCase1() - self.tmqCase2() - + self.tmqCase2() + # tdLog.printNoPrefix("====================================================================") # tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") # self.snapshot = 1 diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py index b23f42258584164b1868eaca4f884cf478bd40b0..20e363341f914b66e5ba73f0d5521b393e5743f1 100644 --- a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py +++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py @@ -20,11 +20,11 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 1000 self.rowsPerTbl = 10 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) - + # drop some ntbs def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") @@ -51,8 +51,8 @@ class TDTestCase: paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum - paraDict['rowsPerTbl'] = self.rowsPerTbl - + paraDict['rowsPerTbl'] = self.rowsPerTbl + tmqCom.initConsumerTable() tdLog.info("start create database....") tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) @@ -60,11 +60,11 @@ class TDTestCase: tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"]) tdLog.info("start insert data into normal tables....") tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"]) - + tdLog.info("create topics from database") - topicFromDb = 'topic_dbt' + topicFromDb = 'topic_dbt' tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName'])) - + if self.snapshot == 0: consumerId = 0 elif self.snapshot == 1: @@ -83,13 +83,13 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tmqCom.getStartConsumeNotifyFromTmqsim() + tmqCom.getStartConsumeNotifyFromTmqsim() tdLog.info("drop some ntables") # drop 1/4 ctbls from half offset paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 1 / 2) paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4) tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"]) - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -98,19 +98,19 @@ class TDTestCase: totalConsumeRows += resultList[i] tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - + if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)): tdLog.exit("tmq consume rows error with snapshot = 0!") - - tdLog.info("wait subscriptions exit ....") + + tdLog.info("wait subscriptions exit ....") tmqCom.waitSubscriptionExit(tdSql, topicFromDb) - + tdSql.query("drop topic %s"%topicFromDb) tdLog.info("success dorp topic: %s"%topicFromDb) tdLog.printNoPrefix("======== test case 1 end ...... ") - - + + # drop some ntbs and create some new ntbs def tmqCase2(self): tdLog.printNoPrefix("======== test case 2: ") @@ -137,8 +137,8 @@ class TDTestCase: paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum - paraDict['rowsPerTbl'] = self.rowsPerTbl - + paraDict['rowsPerTbl'] = self.rowsPerTbl + tmqCom.initConsumerTable() tdLog.info("start create database....") tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) @@ -146,11 +146,11 @@ class TDTestCase: tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"]) tdLog.info("start insert data into normal tables....") tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"]) - + tdLog.info("create topics from database") - topicFromDb = 'topic_dbt' + topicFromDb = 'topic_dbt' tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName'])) - + if self.snapshot == 0: consumerId = 2 elif self.snapshot == 1: @@ -169,20 +169,20 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tmqCom.getStartConsumeNotifyFromTmqsim() + tmqCom.getStartConsumeNotifyFromTmqsim() tdLog.info("drop some ntables") # drop 1/4 ctbls from half offset paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 1 / 2) paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4) tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"]) - + tdLog.info("start create some new normal tables....") paraDict["ctbPrefix"] = 'newCtb' paraDict["ctbNum"] = self.ctbNum tmqCom.create_ntable(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_elm_list=paraDict["colSchema"], colPrefix='c', tblNum=paraDict["ctbNum"]) tdLog.info("start insert data into these new normal tables....") tmqCom.insert_rows_into_ntbl(tsql=tdSql, dbname=paraDict["dbName"], tbname_prefix=paraDict["ctbPrefix"], tbname_index_start_num = 1, column_ele_list=paraDict["colSchema"],startTs=paraDict["startTs"], tblNum=paraDict["ctbNum"], rows=paraDict["rowsPerTbl"]) - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -191,24 +191,24 @@ class TDTestCase: totalConsumeRows += resultList[i] tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - + if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)): tdLog.exit("tmq consume rows error with snapshot = 0!") - - tdLog.info("wait subscriptions exit ....") + + tdLog.info("wait subscriptions exit ....") tmqCom.waitSubscriptionExit(tdSql, topicFromDb) - + tdSql.query("drop topic %s"%topicFromDb) tdLog.info("success dorp topic: %s"%topicFromDb) tdLog.printNoPrefix("======== test case 2 end ...... ") - - def run(self): + + def run(self): # tdLog.printNoPrefix("=============================================") # tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") # self.snapshot = 0 # self.tmqCase1() - # self.tmqCase2() - + # self.tmqCase2() + tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 diff --git a/tests/system-test/7-tmq/tmqDropStb.py b/tests/system-test/7-tmq/tmqDropStb.py index 2889bdc6a6797b15c981bb7fb8c05ad60856b3ea..b172224c2a1651fabf3779e07e7d0ab5706b2ba9 100644 --- a/tests/system-test/7-tmq/tmqDropStb.py +++ b/tests/system-test/7-tmq/tmqDropStb.py @@ -47,7 +47,7 @@ class TDTestCase: pollDelay = 20 showMsg = 1 - showRow = 1 + showRow = 1 hostname = socket.gethostname() @@ -59,7 +59,7 @@ class TDTestCase: def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") tdLog.info("step 1: create database, stb, ctb and insert data") - + tmqCom.initConsumerTable(self.cdbName) tdCom.create_database(tdSql,self.paraDict["dbName"],self.paraDict["dropFlag"]) @@ -69,35 +69,35 @@ class TDTestCase: tdCom.create_ctable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],tag_elm_list=self.paraDict['tagSchema'],count=self.paraDict["ctbNum"],default_ctbname_prefix=self.paraDict["ctbPrefix"]) tmqCom.insert_data_2(tdSql,self.paraDict["dbName"],self.paraDict["ctbPrefix"],self.paraDict["ctbNum"],self.paraDict["rowsPerTbl"],self.paraDict["batchNum"],self.paraDict["startTs"],self.paraDict["ctbStartIdx"]) # pThread1 = tmqCom.asyncInsertData(paraDict=self.paraDict) - + self.paraDict["stbName"] = 'stb2' self.paraDict["ctbPrefix"] = 'newctb' self.paraDict["batchNum"] = 10000 tdCom.create_stable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],column_elm_list=self.paraDict["colSchema"],tag_elm_list=self.paraDict["tagSchema"],count=1, default_stbname_prefix=self.paraDict["stbName"]) tdCom.create_ctable(tdSql,dbname=self.paraDict["dbName"],stbname=self.paraDict["stbName"],tag_elm_list=self.paraDict['tagSchema'],count=self.paraDict["ctbNum"],default_ctbname_prefix=self.paraDict["ctbPrefix"]) # tmqCom.insert_data_2(tdSql,self.paraDict["dbName"],self.paraDict["ctbPrefix"],self.paraDict["ctbNum"],self.paraDict["rowsPerTbl"],self.paraDict["batchNum"],self.paraDict["startTs"],self.paraDict["ctbStartIdx"]) - pThread2 = tmqCom.asyncInsertData(paraDict=self.paraDict) + pThread2 = tmqCom.asyncInsertData(paraDict=self.paraDict) tdLog.info("create topics from db") - topicName1 = 'UpperCasetopic_%s'%(self.paraDict['dbName']) + topicName1 = 'UpperCasetopic_%s'%(self.paraDict['dbName']) tdSql.execute("create topic %s as database %s" %(topicName1, self.paraDict['dbName'])) - + topicList = topicName1 + ',' +topicName1 keyList = '%s,%s,%s,%s'%(self.groupId,self.autoCommit,self.autoCommitInterval,self.autoOffset) self.expectrowcnt = self.paraDict["rowsPerTbl"] * self.paraDict["ctbNum"] * 2 tmqCom.insertConsumerInfo(self.consumerId, self.expectrowcnt,topicList,keyList,self.ifcheckdata,self.ifManualCommit) - - tdLog.info("start consume processor") + + tdLog.info("start consume processor") tmqCom.startTmqSimProcess(self.pollDelay,self.paraDict["dbName"],self.showMsg, self.showRow,self.cdbName) - - tmqCom.getStartConsumeNotifyFromTmqsim() + + tmqCom.getStartConsumeNotifyFromTmqsim() tdLog.info("drop one stable") - self.paraDict["stbName"] = 'stb1' - tdSql.execute("drop table %s.%s" %(self.paraDict['dbName'], self.paraDict['stbName'])) + self.paraDict["stbName"] = 'stb1' + tdSql.execute("drop table %s.%s" %(self.paraDict['dbName'], self.paraDict['stbName'])) # tmqCom.drop_ctable(tdSql, dbname=self.paraDict['dbName'], count=self.paraDict["ctbNum"], default_ctbname_prefix=self.paraDict["ctbPrefix"]) pThread2.join() - + tdLog.info("wait result from consumer, then check it") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -105,7 +105,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if not (totalConsumeRows >= self.expectrowcnt/2 and totalConsumeRows <= self.expectrowcnt): tdLog.info("act consume rows: %d, expect consume rows: between %d and %d"%(totalConsumeRows, self.expectrowcnt/2, self.expectrowcnt)) tdLog.exit("tmq consume rows error!") diff --git a/tests/system-test/7-tmq/tmqDropStbCtb.py b/tests/system-test/7-tmq/tmqDropStbCtb.py index f86d1295f4ff774473e56079d5b3d00b6ae0d2d1..992a128ac077a35708a1ef123ba61bf3352feb78 100644 --- a/tests/system-test/7-tmq/tmqDropStbCtb.py +++ b/tests/system-test/7-tmq/tmqDropStbCtb.py @@ -20,11 +20,11 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 100 self.rowsPerTbl = 1000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) - + def prepareTestEnv(self): tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") paraDict = {'dbName': 'dbt', @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + # tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -65,11 +65,11 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return - + # drop some ctbs def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: ") @@ -96,10 +96,10 @@ class TDTestCase: paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum - paraDict['rowsPerTbl'] = self.rowsPerTbl - + paraDict['rowsPerTbl'] = self.rowsPerTbl + tmqCom.initConsumerTable() - + # again create one new stb1 paraDict["stbName"] = 'stb1' paraDict['ctbPrefix'] = 'ctb1n_' @@ -112,16 +112,16 @@ class TDTestCase: # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict) - + tdLog.info("create topics from database") - topicFromDb = 'topic_dbt' + topicFromDb = 'topic_dbt' tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName'])) - + if self.snapshot == 0: consumerId = 0 elif self.snapshot == 1: consumerId = 1 - + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2) topicList = topicFromDb ifcheckdata = 1 @@ -135,17 +135,17 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tmqCom.getStartConsumeNotifyFromTmqsim() + tmqCom.getStartConsumeNotifyFromTmqsim() tdLog.info("drop some ctables") - paraDict["stbName"] = 'stb' + paraDict["stbName"] = 'stb' paraDict['ctbPrefix'] = 'ctb' paraDict["ctbStartIdx"] = paraDict["ctbStartIdx"] + int(paraDict["ctbNum"] * 3 / 4) # drop 1/4 ctbls paraDict["ctbNum"] = int(paraDict["ctbNum"] / 4) - # tdSql.execute("drop table %s.%s" %(paraDict['dbName'], paraDict['stbName'])) + # tdSql.execute("drop table %s.%s" %(paraDict['dbName'], paraDict['stbName'])) tmqCom.drop_ctable(tdSql, dbname=paraDict['dbName'], count=paraDict["ctbNum"], default_ctbname_prefix=paraDict["ctbPrefix"], ctbStartIdx=paraDict["ctbStartIdx"]) - + pInsertThread.join() - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -154,17 +154,17 @@ class TDTestCase: totalConsumeRows += resultList[i] tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - + if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)): tdLog.exit("tmq consume rows error with snapshot = 0!") - tdLog.info("wait subscriptions exit ....") + tdLog.info("wait subscriptions exit ....") tmqCom.waitSubscriptionExit(tdSql, topicFromDb) - + tdSql.query("drop topic %s"%topicFromDb) tdLog.info("success dorp topic: %s"%topicFromDb) tdLog.printNoPrefix("======== test case 1 end ...... ") - + # drop one stb def tmqCase2(self): tdLog.printNoPrefix("======== test case 2: ") @@ -191,10 +191,10 @@ class TDTestCase: paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum - paraDict['rowsPerTbl'] = self.rowsPerTbl - + paraDict['rowsPerTbl'] = self.rowsPerTbl + tmqCom.initConsumerTable() - + # again create one new stb1 paraDict["stbName"] = 'stb2' paraDict['ctbPrefix'] = 'ctb2n_' @@ -207,16 +207,16 @@ class TDTestCase: # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) pInsertThread = tmqCom.asyncInsertDataByInterlace(paraDict) - + tdLog.info("create topics from database") - topicFromDb = 'topic_dbt' + topicFromDb = 'topic_dbt' tdSql.execute("create topic %s as database %s" %(topicFromDb, paraDict['dbName'])) - + if self.snapshot == 0: consumerId = 2 elif self.snapshot == 1: consumerId = 3 - + expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2) topicList = topicFromDb ifcheckdata = 1 @@ -230,13 +230,13 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - tmqCom.getStartConsumeNotifyFromTmqsim() + tmqCom.getStartConsumeNotifyFromTmqsim() tdLog.info("drop one stable") paraDict["stbName"] = 'stb1' - tdSql.execute("drop table %s.%s" %(paraDict['dbName'], paraDict['stbName'])) + tdSql.execute("drop table %s.%s" %(paraDict['dbName'], paraDict['stbName'])) pInsertThread.join() - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -245,25 +245,25 @@ class TDTestCase: totalConsumeRows += resultList[i] tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) - + if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)): tdLog.exit("tmq consume rows error with snapshot = 0!") - tdLog.info("wait subscriptions exit ....") + tdLog.info("wait subscriptions exit ....") tmqCom.waitSubscriptionExit(tdSql, topicFromDb) - + tdSql.query("drop topic %s"%topicFromDb) tdLog.info("success dorp topic: %s"%topicFromDb) tdLog.printNoPrefix("======== test case 2 end ...... ") - def run(self): + def run(self): tdLog.printNoPrefix("=============================================") tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.snapshot = 0 self.prepareTestEnv() self.tmqCase1() - self.tmqCase2() - + self.tmqCase2() + tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 diff --git a/tests/system-test/7-tmq/tmqError.py b/tests/system-test/7-tmq/tmqError.py index bd8ec565d8e7ab59a3d576c73f516c3b91de328d..9afcfaf9681ad44e5b0a704458eee1dfc1c279f1 100644 --- a/tests/system-test/7-tmq/tmqError.py +++ b/tests/system-test/7-tmq/tmqError.py @@ -56,7 +56,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -65,12 +65,12 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -85,11 +85,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -97,14 +97,14 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + if (platform.system().lower() == 'windows'): shellCmd = 'mintty -h never -w hide ' + buildPath + '\\build\\bin\\tmq_sim.exe -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) - shellCmd += "> nul 2>&1 &" + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += "> nul 2>&1 &" else: shellCmd = 'nohup ' + buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -134,7 +134,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -149,7 +149,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(stbName,i) for j in range(rowsPerTbl): @@ -168,8 +168,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -189,11 +189,11 @@ class TDTestCase: def tmqCase1(self, cfgPath, buildPath): ''' - Leave a TMQ process. Stop taosd, delete the data directory, restart taosd, + Leave a TMQ process. Stop taosd, delete the data directory, restart taosd, and restart a consumption process to complete a consumption ''' tdLog.printNoPrefix("======== test case 1: ") - + self.initConsumerTable() # create and start thread @@ -217,7 +217,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 # expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -236,7 +236,7 @@ class TDTestCase: showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - + time.sleep(3) tdLog.info("================= stop dnode, and remove data file, then start dnode ===========================") tdDnodes.stop(1) @@ -248,7 +248,7 @@ class TDTestCase: tdDnodes.start(1) time.sleep(2) - ######### redo to consume + ######### redo to consume self.initConsumerTable() self.create_database(tdSql, parameterDict["dbName"]) @@ -258,7 +258,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -282,7 +282,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) if not (totalConsumeRows == expectrowcnt): tdLog.exit("tmq consume rows error!") @@ -293,7 +293,7 @@ class TDTestCase: else: os.system('pkill tmq_sim') - tdLog.printNoPrefix("======== test case 1 end ...... ") + tdLog.printNoPrefix("======== test case 1 end ...... ") def run(self): tdSql.prepare() diff --git a/tests/system-test/7-tmq/tmqModule.py b/tests/system-test/7-tmq/tmqModule.py index 086fde8f05a9e798a3a1219d4053cf2808bb0b25..1ff47da15922c1ebc51c9ee72a2e155eaac81d4f 100644 --- a/tests/system-test/7-tmq/tmqModule.py +++ b/tests/system-test/7-tmq/tmqModule.py @@ -56,7 +56,7 @@ class TDTestCase: print(cur) return cur - def initConsumerTable(self,cdbName='cdb'): + def initConsumerTable(self,cdbName='cdb'): tdLog.info("create consume database, and consume info table, and consume result table") tdSql.query("create database if not exists %s vgroups 1"%(cdbName)) tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) @@ -65,12 +65,12 @@ class TDTestCase: tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName) - def initConsumerInfoTable(self,cdbName='cdb'): + def initConsumerInfoTable(self,cdbName='cdb'): tdLog.info("drop consumeinfo table") tdSql.query("drop table if exists %s.consumeinfo "%(cdbName)) tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName) - def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): + def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'): sql = "insert into %s.consumeinfo values "%cdbName sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit) tdLog.info("consume info sql: %s"%sql) @@ -85,11 +85,11 @@ class TDTestCase: break else: time.sleep(5) - + for i in range(expectRows): tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3))) resultList.append(tdSql.getData(i , 3)) - + return resultList def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0): @@ -98,9 +98,9 @@ class TDTestCase: logFile = cfgPath + '/../log/valgrind-tmq.log' shellCmd = 'nohup valgrind --log-file=' + logFile shellCmd += '--tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all --num-callers=20 -v --workaround-gcc296-bugs=yes ' - + shellCmd += buildPath + '/build/bin/tmq_sim -c ' + cfgPath - shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) + shellCmd += " -y %d -d %s -g %d -r %d -w %s "%(pollDelay, dbName, showMsg, showRow, cdbName) shellCmd += "> /dev/null 2>&1 &" tdLog.info(shellCmd) os.system(shellCmd) @@ -130,7 +130,7 @@ class TDTestCase: sql = pre_create if sql != pre_create: tsql.execute(sql) - + tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName)) return @@ -149,7 +149,7 @@ class TDTestCase: ctbDict[i] = 0 #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfCtb = 0 + rowsOfCtb = 0 while rowsOfCtb < rowsPerTbl: for i in range(ctbNum): sql += " %s.%s_%d values "%(dbName,ctbPrefix,i) @@ -176,7 +176,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s_%d values "%(ctbPrefix,i) for j in range(rowsPerTbl): @@ -207,7 +207,7 @@ class TDTestCase: startTs = int(round(t * 1000)) #tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows)) - rowsOfSql = 0 + rowsOfSql = 0 for i in range(ctbNum): sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i,dbName,stbName,i) for j in range(rowsPerTbl): @@ -226,8 +226,8 @@ class TDTestCase: tsql.execute(sql) tdLog.debug("insert data ............ [OK]") return - - def prepareEnv(self, **parameterDict): + + def prepareEnv(self, **parameterDict): # create new connector for my thread tsql=self.newcur(parameterDict['cfg'], 'localhost', 6030) @@ -246,8 +246,8 @@ class TDTestCase: return def tmqCase1(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 1: ") - + tdLog.printNoPrefix("======== test case 1: ") + self.initConsumerTable() # create and start thread @@ -263,7 +263,7 @@ class TDTestCase: 'batchNum': 33, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -271,7 +271,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -296,7 +296,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -306,8 +306,8 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 2: ") - + tdLog.printNoPrefix("======== test case 2: ") + self.initConsumerTable() # create and start thread @@ -343,7 +343,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -392,7 +392,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -402,8 +402,8 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 2 end ...... ") def tmqCase3(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 3: ") - + tdLog.printNoPrefix("======== test case 3: ") + self.initConsumerTable() # create and start thread @@ -427,7 +427,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -445,7 +445,7 @@ class TDTestCase: showMsg = 1 showRow = 1 self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow) - + time.sleep(5) tdLog.info("drop som child table of stb1") dropTblNum = 4 @@ -460,7 +460,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + remaindrowcnt = parameterDict["rowsPerTbl"] * (parameterDict["ctbNum"] - dropTblNum) if not (totalConsumeRows < expectrowcnt and totalConsumeRows > remaindrowcnt): @@ -473,7 +473,7 @@ class TDTestCase: def tmqCase4(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 4: ") - + self.initConsumerTable() # create and start thread @@ -489,7 +489,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -502,7 +502,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -527,7 +527,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -545,7 +545,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -556,7 +556,7 @@ class TDTestCase: def tmqCase5(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 5: ") - + self.initConsumerTable() # create and start thread @@ -572,7 +572,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -585,7 +585,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -610,7 +610,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -628,7 +628,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != (expectrowcnt * (1 + 1/4)): tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -639,7 +639,7 @@ class TDTestCase: def tmqCase6(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 6: ") - + self.initConsumerTable() # create and start thread @@ -655,7 +655,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -668,7 +668,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -693,7 +693,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -715,7 +715,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -726,7 +726,7 @@ class TDTestCase: def tmqCase7(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 7: ") - + self.initConsumerTable() # create and start thread @@ -742,7 +742,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -755,7 +755,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -780,7 +780,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -798,7 +798,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -808,8 +808,8 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 7 end ...... ") def tmqCase8(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 8: ") - + tdLog.printNoPrefix("======== test case 8: ") + self.initConsumerTable() # create and start thread @@ -838,7 +838,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -863,7 +863,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -883,7 +883,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -903,7 +903,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt*2: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) tdLog.exit("tmq consume rows error!") @@ -913,8 +913,8 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 8 end ...... ") def tmqCase9(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 9: ") - + tdLog.printNoPrefix("======== test case 9: ") + self.initConsumerTable() # create and start thread @@ -943,7 +943,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -968,7 +968,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -992,7 +992,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -1012,7 +1012,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt*2: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) tdLog.exit("tmq consume rows error!") @@ -1022,8 +1022,8 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 9 end ...... ") def tmqCase10(self, cfgPath, buildPath): - tdLog.printNoPrefix("======== test case 10: ") - + tdLog.printNoPrefix("======== test case 10: ") + self.initConsumerTable() # create and start thread @@ -1052,7 +1052,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -1077,7 +1077,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -1101,7 +1101,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt-10000: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt-10000)) tdLog.exit("tmq consume rows error!") @@ -1125,7 +1125,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt*2: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*2)) tdLog.exit("tmq consume rows error!") @@ -1136,7 +1136,7 @@ class TDTestCase: def tmqCase11(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 11: ") - + self.initConsumerTable() # create and start thread @@ -1152,7 +1152,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -1165,7 +1165,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -1190,7 +1190,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -1212,7 +1212,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != 0: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, 0)) tdLog.exit("tmq consume rows error!") @@ -1223,7 +1223,7 @@ class TDTestCase: def tmqCase12(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 12: ") - + self.initConsumerTable() # create and start thread @@ -1239,7 +1239,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -1252,7 +1252,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -1277,7 +1277,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -1299,7 +1299,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -1310,7 +1310,7 @@ class TDTestCase: def tmqCase13(self, cfgPath, buildPath): tdLog.printNoPrefix("======== test case 13: ") - + self.initConsumerTable() # create and start thread @@ -1326,7 +1326,7 @@ class TDTestCase: 'batchNum': 100, \ 'startTs': 1640966400000} # 2022-01-01 00:00:00.000 parameterDict['cfg'] = cfgPath - + self.create_database(tdSql, parameterDict["dbName"]) self.create_stable(tdSql, parameterDict["dbName"], parameterDict["stbName"]) self.create_ctables(tdSql, parameterDict["dbName"], parameterDict["stbName"], parameterDict["ctbNum"]) @@ -1339,7 +1339,7 @@ class TDTestCase: tdLog.info("create topics from stb1") topicFromStb1 = 'topic_stb1' - + tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, parameterDict['dbName'], parameterDict['stbName'])) consumerId = 0 expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] @@ -1364,7 +1364,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt/4: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt/4)) tdLog.exit("tmq consume rows error!") @@ -1387,7 +1387,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt*(1/2+1/4): tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt*(1/2+1/4))) tdLog.exit("tmq consume rows error!") @@ -1410,7 +1410,7 @@ class TDTestCase: totalConsumeRows = 0 for i in range(expectRows): totalConsumeRows += resultList[i] - + if totalConsumeRows != expectrowcnt: tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt)) tdLog.exit("tmq consume rows error!") @@ -1431,7 +1431,7 @@ class TDTestCase: tdLog.info("cfgPath: %s" % cfgPath) self.tmqCase1(cfgPath, buildPath) - # self.tmqCase2(cfgPath, buildPath) + # self.tmqCase2(cfgPath, buildPath) # self.tmqCase3(cfgPath, buildPath) # self.tmqCase4(cfgPath, buildPath) # self.tmqCase5(cfgPath, buildPath) diff --git a/tests/system-test/7-tmq/tmqSubscribeStb-r3.py b/tests/system-test/7-tmq/tmqSubscribeStb-r3.py index 6461ee9644e926193086c35b94f1ab1f38b7f553..c5f98bc3a004e8aa20918fe1482e38ef10705416 100644 --- a/tests/system-test/7-tmq/tmqSubscribeStb-r3.py +++ b/tests/system-test/7-tmq/tmqSubscribeStb-r3.py @@ -39,20 +39,20 @@ class TDTestCase: cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) tdLog.info(cmdStr) os.system(cmdStr) - + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() while True: dst = queryFile.readline() src = consumeFile.readline() - + if dst: if dst != src: tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) @@ -85,7 +85,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=self.replica) tdLog.info("create stb") @@ -101,7 +101,7 @@ class TDTestCase: # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tmqCom.asyncInsertDataByInterlace(paraDict) - + tdLog.info("wait some data inserted") exitFlag = 1 while exitFlag: @@ -112,7 +112,7 @@ class TDTestCase: if (rowsInserted > ((self.ctbNum * self.rowsPerTbl)/5)): exitFlag = 0 time.sleep(0.1) - + tdLog.info("inserted rows: %d"%tdSql.getData(0,0)) # tdDnodes=cluster.dnodes tdLog.info("================= restart dnode 2===========================") @@ -123,18 +123,18 @@ class TDTestCase: cluster.dnodes[2].starttaosd() tdLog.info("================= restart dnode 4===========================") cluster.dnodes[3].stoptaosd() - cluster.dnodes[3].starttaosd() + cluster.dnodes[3].starttaosd() tdLog.info("================= restart dnode 5===========================") cluster.dnodes[4].stoptaosd() cluster.dnodes[4].starttaosd() - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return def tmqCase1(self): - tdLog.printNoPrefix("======== test case 1: ") - + tdLog.printNoPrefix("======== test case 1: ") + # create and start thread paraDict = {'dbName': 'dbt', 'dropFlag': 1, @@ -159,14 +159,14 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + consumerId = 0 expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] topicList = topicFromStb1 @@ -178,9 +178,9 @@ class TDTestCase: auto.offset.reset:earliest' tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("start consume processor") + tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -190,13 +190,13 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt)) - + if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tmqCom.waitSubscriptionExit(tdSql, topicFromStb1) tdSql.query("drop topic %s"%topicFromStb1) @@ -204,8 +204,8 @@ class TDTestCase: tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") - + tdLog.printNoPrefix("======== test case 2: ") + # create and start thread paraDict = {'dbName': 'dbt', 'dropFlag': 1, @@ -230,14 +230,14 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + consumerId = 0 expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] topicList = topicFromStb1 @@ -249,7 +249,7 @@ class TDTestCase: auto.offset.reset:earliest' tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit) - tdLog.info("start consume processor") + tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) tdLog.info("================= restart dnode 2===========================") @@ -260,11 +260,11 @@ class TDTestCase: cluster.dnodes[2].starttaosd() tdLog.info("================= restart dnode 4===========================") cluster.dnodes[3].stoptaosd() - cluster.dnodes[3].starttaosd() + cluster.dnodes[3].starttaosd() tdLog.info("================= restart dnode 5===========================") cluster.dnodes[4].stoptaosd() cluster.dnodes[4].starttaosd() - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -274,13 +274,13 @@ class TDTestCase: tdSql.query(queryString) totalRowsFromQuery = tdSql.getRows() - + tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt)) - + if totalConsumeRows != totalRowsFromQuery: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tmqCom.waitSubscriptionExit(tdSql, topicFromStb1) tdSql.query("drop topic %s"%topicFromStb1) diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py index 84db8e840b2b4187abe6473a9b651a5af2220b22..65515c48221393dc28261dc8e8b05a315274eae5 100644 --- a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py @@ -22,12 +22,12 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 100 self.rowsPerTbl = 1000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - + def prepare_udf_so(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -66,20 +66,20 @@ class TDTestCase: cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) tdLog.info(cmdStr) os.system(cmdStr) - + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() while True: dst = queryFile.readline() src = consumeFile.readline() - + if dst: if dst != src: tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) @@ -112,7 +112,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -127,11 +127,11 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return - + def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: multi sub table") paraDict = {'dbName': 'dbt', @@ -168,13 +168,13 @@ class TDTestCase: # tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) # tdLog.info("insert data") # tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - + tdLog.info("create topics from stb with filter") queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -190,10 +190,10 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") @@ -208,7 +208,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[1], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) consumerId = 1 @@ -218,7 +218,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[1] != resultList[0]: @@ -228,12 +228,12 @@ class TDTestCase: # self.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) tdLog.printNoPrefix("======== test case 1 end ...... ") - + def tmqCase2(self): tdLog.printNoPrefix("======== test case 2: multi sub table, consume with auto create tble and insert data") paraDict = {'dbName': 'dbt', @@ -270,13 +270,13 @@ class TDTestCase: # tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) # tdLog.info("insert data") # tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - + tdLog.info("create topics from stb with filter") queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - # tdSql.query(queryString) + # tdSql.query(queryString) # expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -292,18 +292,18 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl) + paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl) tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - - tdSql.query(queryString) + + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("2 tmq consume rows error!") @@ -318,7 +318,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[1], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) consumerId = 3 @@ -328,7 +328,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[1] != resultList[0]: @@ -338,7 +338,7 @@ class TDTestCase: # self.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -348,13 +348,13 @@ class TDTestCase: # tdSql.prepare() self.prepare_udf_so() self.create_udf_function() - + tdLog.printNoPrefix("=============================================") tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.prepareTestEnv() self.tmqCase1() self.tmqCase2() - + # tdLog.printNoPrefix("====================================================================") # tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") # self.prepareTestEnv() diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py index 5b964e1d38b9edf698d736507ab8fc4658746adb..1c999b86c7a044a84f1eb42d42084c2d8aa66aca 100644 --- a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py +++ b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py @@ -22,12 +22,12 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 100 self.rowsPerTbl = 1000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - + def prepare_udf_so(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -66,20 +66,20 @@ class TDTestCase: cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) tdLog.info(cmdStr) os.system(cmdStr) - + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() while True: dst = queryFile.readline() src = consumeFile.readline() - + if dst: if dst != src: tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) @@ -112,7 +112,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -127,11 +127,11 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return - + def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: multi sub table") paraDict = {'dbName': 'dbt', @@ -168,13 +168,13 @@ class TDTestCase: # tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) # tdLog.info("insert data") # tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - + tdLog.info("create topics from stb with filter") queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -190,10 +190,10 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") @@ -208,7 +208,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[1], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) consumerId = 1 @@ -218,7 +218,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[1] != resultList[0]: @@ -228,12 +228,12 @@ class TDTestCase: # self.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) tdLog.printNoPrefix("======== test case 1 end ...... ") - + def tmqCase2(self): tdLog.printNoPrefix("======== test case 2: multi sub table, consume with auto create tble and insert data") paraDict = {'dbName': 'dbt', @@ -270,13 +270,13 @@ class TDTestCase: # tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) # tdLog.info("insert data") # tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - + tdLog.info("create topics from stb with filter") queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - # tdSql.query(queryString) + # tdSql.query(queryString) # expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -292,18 +292,18 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl) + paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl) tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - - tdSql.query(queryString) + + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("2 tmq consume rows error!") @@ -318,7 +318,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[1], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) consumerId = 3 @@ -328,7 +328,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[1] != resultList[0]: @@ -338,7 +338,7 @@ class TDTestCase: # self.checkFileContent(consumerId, queryString) # tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -348,13 +348,13 @@ class TDTestCase: # tdSql.prepare() self.prepare_udf_so() self.create_udf_function() - + # tdLog.printNoPrefix("=============================================") # tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") # self.prepareTestEnv() # self.tmqCase1() # self.tmqCase2() - + tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.prepareTestEnv() diff --git a/tests/system-test/7-tmq/tmqUdf.py b/tests/system-test/7-tmq/tmqUdf.py index 04067ccf65769d7d831fc190a9761ba52459128f..6e1843404e655ab3f721eb9afefbb1cee9336211 100644 --- a/tests/system-test/7-tmq/tmqUdf.py +++ b/tests/system-test/7-tmq/tmqUdf.py @@ -22,12 +22,12 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 1 self.rowsPerTbl = 1000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) #tdSql.init(conn.cursor(), logSql) # output sql.txt file - + def prepare_udf_so(self): selfPath = os.path.dirname(os.path.realpath(__file__)) @@ -66,20 +66,20 @@ class TDTestCase: cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile) tdLog.info(cmdStr) os.system(cmdStr) - + consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId) tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile)) consumeFile = open(consumeRowsFile, mode='r') queryFile = open(dstFile, mode='r') - + # skip first line for it is schema queryFile.readline() while True: dst = queryFile.readline() src = consumeFile.readline() - + if dst: if dst != src: tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId) @@ -112,7 +112,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -127,11 +127,11 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return - + def tmqCase1(self): tdLog.printNoPrefix("======== test case 1: one sub table") paraDict = {'dbName': 'dbt', @@ -168,13 +168,13 @@ class TDTestCase: # tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) # tdLog.info("insert data") # tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - + tdLog.info("create topics from stb with filter") queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -190,10 +190,10 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("0 tmq consume rows error!") @@ -209,7 +209,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[1], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) consumerId = 1 @@ -219,7 +219,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[1] != resultList[0]: @@ -229,12 +229,12 @@ class TDTestCase: self.checkFileContent(consumerId, queryString) tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) tdLog.printNoPrefix("======== test case 1 end ...... ") - + def tmqCase2(self): tdLog.printNoPrefix("======== test case 2: one sub table, consume with auto create tble and insert data") paraDict = {'dbName': 'dbt', @@ -271,13 +271,13 @@ class TDTestCase: # tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix']) # tdLog.info("insert data") # tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"]) - + tdLog.info("create topics from stb with filter") queryString = "select ts,c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicNameList[0], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - # tdSql.query(queryString) + # tdSql.query(queryString) # expectRowsList.append(tdSql.getRows()) # init consume info, and start tmq_sim, then check consume result @@ -293,18 +293,18 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl) + paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl) tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) - - tdSql.query(queryString) + + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) - + if expectRowsList[0] != resultList[0]: tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0])) tdLog.exit("2 tmq consume rows error!") @@ -319,7 +319,7 @@ class TDTestCase: sqlString = "create topic %s as %s" %(topicNameList[1], queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - tdSql.query(queryString) + tdSql.query(queryString) expectRowsList.append(tdSql.getRows()) consumerId = 3 @@ -329,7 +329,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'],snapshot=paraDict['snapshot']) - tdLog.info("wait the consume result") + tdLog.info("wait the consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) if expectRowsList[1] != resultList[0]: @@ -339,7 +339,7 @@ class TDTestCase: self.checkFileContent(consumerId, queryString) tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId)) - time.sleep(10) + time.sleep(10) for i in range(len(topicNameList)): tdSql.query("drop topic %s"%topicNameList[i]) @@ -349,13 +349,13 @@ class TDTestCase: # tdSql.prepare() self.prepare_udf_so() self.create_udf_function() - + tdLog.printNoPrefix("=============================================") tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.prepareTestEnv() self.tmqCase1() self.tmqCase2() - + tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.prepareTestEnv() diff --git a/tests/system-test/7-tmq/tmqUpdate-1ctb.py b/tests/system-test/7-tmq/tmqUpdate-1ctb.py index 5d891bdedfd5763da3de8fff46e6080e93071aa5..12de04cd9c525e4ae8bc9aaa70d45942e7c3c0e2 100644 --- a/tests/system-test/7-tmq/tmqUpdate-1ctb.py +++ b/tests/system-test/7-tmq/tmqUpdate-1ctb.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 1 self.rowsPerTbl = 10000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -65,8 +65,8 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return @@ -96,7 +96,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + # update to half tables paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", @@ -104,24 +104,24 @@ class TDTestCase: # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - + tdSql.execute(sqlString) + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 0 - + if self.snapshot == 0: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/2)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -143,18 +143,18 @@ class TDTestCase: tdSql.query(queryString) totalRowsInserted = tdSql.getRows() - + tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted)) if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error!") - - tmqCom.checkFileContent(consumerId, queryString) + + tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -174,15 +174,15 @@ class TDTestCase: 'showMsg': 1, 'showRow': 1, 'snapshot': 0} - + paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - tdLog.info("restart taosd to ensure that the data falls into the disk") + + tdLog.info("restart taosd to ensure that the data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) - + # update to half tables paraDict['startTs'] = paraDict['startTs'] + int(self.rowsPerTbl / 2) paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) @@ -191,16 +191,16 @@ class TDTestCase: startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) # tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tmqCom.initConsumerTable() tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 1 @@ -208,7 +208,7 @@ class TDTestCase: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -220,7 +220,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("insert process end, and start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -230,13 +230,13 @@ class TDTestCase: tdSql.query(queryString) totalRowsInserted = tdSql.getRows() - + tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt)) - + if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) @@ -249,14 +249,14 @@ class TDTestCase: tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.tmqCase1() self.tmqCase2() - + self.prepareTestEnv() tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 self.tmqCase1() self.tmqCase2() - + def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py index 3b6ae65316ecdac40c085b55681f9f9b9f9fe381..02641d8bcb0603c7e2caf3e2486a577e62176269 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py @@ -21,7 +21,7 @@ class TDTestCase: self.ctbNum = 50 self.rowsPerTbl = 1000 self.autoCtbPrefix = 'aCtb' - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -51,7 +51,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -66,8 +66,8 @@ class TDTestCase: tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix, ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return @@ -96,7 +96,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + # update to half tables paraDict['ctbNum'] = int(self.ctbNum/2) paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) @@ -105,15 +105,15 @@ class TDTestCase: startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - + tdSql.execute(sqlString) + paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 0 @@ -121,7 +121,7 @@ class TDTestCase: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -143,18 +143,18 @@ class TDTestCase: tdSql.query(queryString) totalRowsInserted = tdSql.getRows() - + tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted)) if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -174,15 +174,15 @@ class TDTestCase: 'showMsg': 1, 'showRow': 1, 'snapshot': 0} - + paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - tdLog.info("restart taosd to ensure that the data falls into the disk") + + tdLog.info("restart taosd to ensure that the data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) - + # update to half tables paraDict['ctbNum'] = int(self.ctbNum/2) paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) @@ -190,23 +190,23 @@ class TDTestCase: tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix, ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2)) - + tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="aCtby", ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2)) - + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2)) tmqCom.initConsumerTable() tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + # paraDict['ctbNum'] = self.ctbNum paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl @@ -215,7 +215,7 @@ class TDTestCase: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2 + 1/2*1/2)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -227,7 +227,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -237,13 +237,13 @@ class TDTestCase: tdSql.query(queryString) totalRowsInserted = tdSql.getRows() - + tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt)) - + if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) @@ -256,14 +256,14 @@ class TDTestCase: tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.tmqCase1() self.tmqCase2() - + # self.prepareTestEnv() # tdLog.printNoPrefix("====================================================================") # tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") # self.snapshot = 1 # self.tmqCase1() # self.tmqCase2() - + def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py index 0ac897c2cd4622d02dd2832b0854ba5b28e74315..baeb70e65680aa731d9db4eb28385b24e67d9a47 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py @@ -21,7 +21,7 @@ class TDTestCase: self.ctbNum = 50 self.rowsPerTbl = 1000 self.autoCtbPrefix = 'aCtb' - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -51,7 +51,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -66,8 +66,8 @@ class TDTestCase: tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix, ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return @@ -96,7 +96,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + # update to half tables paraDict['ctbNum'] = int(self.ctbNum/2) paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) @@ -105,15 +105,15 @@ class TDTestCase: startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - + tdSql.execute(sqlString) + paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 0 @@ -121,7 +121,7 @@ class TDTestCase: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -143,18 +143,18 @@ class TDTestCase: tdSql.query(queryString) totalRowsInserted = tdSql.getRows() - + tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted)) if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -174,15 +174,15 @@ class TDTestCase: 'showMsg': 1, 'showRow': 1, 'snapshot': 0} - + paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - tdLog.info("restart taosd to ensure that the data falls into the disk") + + tdLog.info("restart taosd to ensure that the data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) - + # update to half tables paraDict['ctbNum'] = int(self.ctbNum/2) paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) @@ -190,23 +190,23 @@ class TDTestCase: tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix, ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2)) - + tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="aCtby", ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2)) - + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2)) tmqCom.initConsumerTable() tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + # paraDict['ctbNum'] = self.ctbNum paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl @@ -215,7 +215,7 @@ class TDTestCase: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2 + 1/2*1/2)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -227,7 +227,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -237,13 +237,13 @@ class TDTestCase: tdSql.query(queryString) totalRowsInserted = tdSql.getRows() - + tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt)) - + if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) @@ -256,14 +256,14 @@ class TDTestCase: # tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") # self.tmqCase1() # self.tmqCase2() - + self.prepareTestEnv() tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 self.tmqCase1() self.tmqCase2() - + def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py index 892137be43a39ea0889143fd679ec38f20489928..bde266e63401f27e1ff9bfb1085fc26636e0bc77 100644 --- a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py +++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py @@ -21,7 +21,7 @@ class TDTestCase: self.ctbNum = 50 self.rowsPerTbl = 1000 self.autoCtbPrefix = 'aCtb' - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -51,7 +51,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1) tdLog.info("create stb") @@ -66,8 +66,8 @@ class TDTestCase: tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix, ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") # tdSql.query("flush database %s"%(paraDict['dbName'])) return @@ -96,7 +96,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + # update to half tables paraDict['ctbNum'] = int(self.ctbNum/2) paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) @@ -105,15 +105,15 @@ class TDTestCase: startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - + tdSql.execute(sqlString) + paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 0 @@ -121,7 +121,7 @@ class TDTestCase: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -143,18 +143,18 @@ class TDTestCase: tdSql.query(queryString) totalRowsInserted = tdSql.getRows() - + tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted)) if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 1 end ...... ") def tmqCase2(self): - tdLog.printNoPrefix("======== test case 2: ") + tdLog.printNoPrefix("======== test case 2: ") paraDict = {'dbName': 'dbt', 'dropFlag': 1, 'event': '', @@ -174,15 +174,15 @@ class TDTestCase: 'showMsg': 1, 'showRow': 1, 'snapshot': 0} - + paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - - tdLog.info("restart taosd to ensure that the data falls into the disk") + + tdLog.info("restart taosd to ensure that the data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) - + # update to half tables paraDict['ctbNum'] = int(self.ctbNum/2) paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) @@ -190,23 +190,23 @@ class TDTestCase: tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=self.autoCtbPrefix, ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2)) - + tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="aCtby", ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2)) - + tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']+int(self.ctbNum/2)) tmqCom.initConsumerTable() tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) tdSql.execute(sqlString) - + # paraDict['ctbNum'] = self.ctbNum paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl @@ -215,7 +215,7 @@ class TDTestCase: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2*2 + 1/2*1/2)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2 + 1/2*1/2)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -227,7 +227,7 @@ class TDTestCase: tdLog.info("start consume processor") tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot']) - + tdLog.info("start to check consume result") expectRows = 1 resultList = tmqCom.selectConsumeResult(expectRows) @@ -237,13 +237,13 @@ class TDTestCase: tdSql.query(queryString) totalRowsInserted = tdSql.getRows() - + tdLog.info("act consume rows: %d, act insert rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsInserted, expectrowcnt)) - + if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) @@ -256,14 +256,14 @@ class TDTestCase: tdLog.printNoPrefix("======== snapshot is 0: only consume from wal") self.tmqCase1() self.tmqCase2() - + self.prepareTestEnv() tdLog.printNoPrefix("====================================================================") tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.snapshot = 1 self.tmqCase1() self.tmqCase2() - + def stop(self): tdSql.close() diff --git a/tests/system-test/7-tmq/tmqUpdateWithConsume.py b/tests/system-test/7-tmq/tmqUpdateWithConsume.py index 2dd3a061c62bf5ac63a135e99424ac549ef83a54..be07ba13a978a8705f014b04ee7c09ba37e3a33b 100644 --- a/tests/system-test/7-tmq/tmqUpdateWithConsume.py +++ b/tests/system-test/7-tmq/tmqUpdateWithConsume.py @@ -20,7 +20,7 @@ class TDTestCase: self.vgroups = 4 self.ctbNum = 100 self.rowsPerTbl = 1000 - + def init(self, conn, logSql): tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor(), False) @@ -50,7 +50,7 @@ class TDTestCase: paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl - + tmqCom.initConsumerTable() tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1, wal_retention_size=-1, wal_retention_period=-1) tdLog.info("create stb") @@ -65,8 +65,8 @@ class TDTestCase: # tmqCom.insert_data_with_autoCreateTbl(tsql=tdSql,dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix="ctbx", # ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], # startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - - # tdLog.info("restart taosd to ensure that the data falls into the disk") + + # tdLog.info("restart taosd to ensure that the data falls into the disk") tdSql.query("flush database %s"%(paraDict['dbName'])) return @@ -94,30 +94,30 @@ class TDTestCase: paraDict['snapshot'] = self.snapshot paraDict['vgroups'] = self.vgroups paraDict['ctbNum'] = self.ctbNum - paraDict['rowsPerTbl'] = self.rowsPerTbl - + paraDict['rowsPerTbl'] = self.rowsPerTbl + # update to half tables paraDict['rowsPerTbl'] = int(self.rowsPerTbl / 2) tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"], ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"], - startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) - + startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx']) + tdLog.info("create topics from stb1") - topicFromStb1 = 'topic_stb1' + topicFromStb1 = 'topic_stb1' queryString = "select ts, c1, c2 from %s.%s"%(paraDict['dbName'], paraDict['stbName']) sqlString = "create topic %s as %s" %(topicFromStb1, queryString) tdLog.info("create topic sql: %s"%sqlString) - tdSql.execute(sqlString) - + tdSql.execute(sqlString) + # paraDict['ctbNum'] = self.ctbNum paraDict['rowsPerTbl'] = self.rowsPerTbl consumerId = 0 - + if self.snapshot == 0: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (1 + 1/2 + 1)) elif self.snapshot == 1: expectrowcnt = int(paraDict["rowsPerTbl"] * paraDict["ctbNum"] * (2)) - + topicList = topicFromStb1 ifcheckdata = 1 ifManualCommit = 1 @@ -143,16 +143,16 @@ class TDTestCase: tdSql.query(queryString) totalRowsInserted = tdSql.getRows() - + tdLog.info("act consume rows: %d, expect consume rows: %d, act insert rows: %d"%(totalConsumeRows, expectrowcnt, totalRowsInserted)) if self.snapshot == 0: if totalConsumeRows != expectrowcnt: tdLog.exit("tmq consume rows error!") elif self.snapshot == 1: if not (totalConsumeRows < expectrowcnt and totalConsumeRows >= totalRowsInserted): - tdLog.exit("tmq consume rows error!") - - # tmqCom.checkFileContent(consumerId, queryString) + tdLog.exit("tmq consume rows error!") + + # tmqCom.checkFileContent(consumerId, queryString) tdSql.query("drop topic %s"%topicFromStb1) tdLog.printNoPrefix("======== test case 1 end ...... ") @@ -162,7 +162,7 @@ class TDTestCase: self.ctbNum = 1 self.snapshot = 0 self.prepareTestEnv() - self.tmqCase1() + self.tmqCase1() tdLog.printNoPrefix("======== snapshot is 1: firstly consume from tsbs, and then from wal") self.prepareTestEnv() @@ -178,7 +178,7 @@ class TDTestCase: self.prepareTestEnv() self.snapshot = 1 self.tmqCase1() - + def stop(self): tdSql.close() diff --git a/tests/system-test/clusterCase.sh b/tests/system-test/clusterCase.sh index cfc44f7f957edee02fe090a2ef917ab7a36347ff..a4bec11d03732894a8418526f9e5acfe6967755f 100755 --- a/tests/system-test/clusterCase.sh +++ b/tests/system-test/clusterCase.sh @@ -19,4 +19,28 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 5 # python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5 - +# test case of vnode +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py  -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py  -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1 +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py   -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1  +python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 + diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 7a3ad1070cfe1f41e27985a05f1489e8a4d870d9..3fc38ac898a6b38e3a6dc1cff70e915139274e93 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -31,6 +31,7 @@ python3 ./test.py -f 1-insert/time_range_wise.py python3 ./test.py -f 1-insert/block_wise.py python3 ./test.py -f 1-insert/create_retentions.py python3 ./test.py -f 1-insert/table_param_ttl.py +python3 ./test.py -f 1-insert/mutil_stage.py python3 ./test.py -f 1-insert/update_data_muti_rows.py @@ -146,7 +147,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py python3 ./test.py -f 2-query/elapsed.py python3 ./test.py -f 2-query/csum.py -python3 ./test.py -f 2-query/mavg.py +#python3 ./test.py -f 2-query/mavg.py python3 ./test.py -f 2-query/sample.py python3 ./test.py -f 2-query/function_diff.py python3 ./test.py -f 2-query/unique.py @@ -189,9 +190,33 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 6 -M 3 -C 5 # python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 # python3 test.py -f 6-cluster/5dnode3mnodeStopConnect.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 +python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3 +python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3 +python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 + +# vnode case +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 python3 ./test.py -f 7-tmq/dropDbR3ConflictTransaction.py -N 3 @@ -316,7 +341,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2 python3 ./test.py -f 2-query/avg.py -Q 2 # python3 ./test.py -f 2-query/elapsed.py -Q 2 python3 ./test.py -f 2-query/csum.py -Q 2 -python3 ./test.py -f 2-query/mavg.py -Q 2 +#python3 ./test.py -f 2-query/mavg.py -Q 2 python3 ./test.py -f 2-query/sample.py -Q 2 python3 ./test.py -f 2-query/function_diff.py -Q 2 python3 ./test.py -f 2-query/unique.py -Q 2 @@ -403,7 +428,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3 # python3 ./test.py -f 2-query/avg.py -Q 3 # python3 ./test.py -f 2-query/elapsed.py -Q 3 python3 ./test.py -f 2-query/csum.py -Q 3 -python3 ./test.py -f 2-query/mavg.py -Q 3 +#python3 ./test.py -f 2-query/mavg.py -Q 3 python3 ./test.py -f 2-query/sample.py -Q 3 python3 ./test.py -f 2-query/function_diff.py -Q 3 python3 ./test.py -f 2-query/unique.py -Q 3 @@ -427,4 +452,4 @@ python3 ./test.py -f 2-query/count_partition.py -Q 3 python3 ./test.py -f 2-query/max_partition.py -Q 3 python3 ./test.py -f 2-query/last_row.py -Q 3 python3 ./test.py -f 2-query/tsbsQuery.py -Q 3 -python3 ./test.py -f 2-query/sml.py -Q 3 \ No newline at end of file +python3 ./test.py -f 2-query/sml.py -Q 3 diff --git a/tests/test/c/sml_test.c b/tests/test/c/sml_test.c index b8a9b816cfe2bdfb46e636e63df6b56b4cdcb262..50249a5c5621aad4821fd7866950021f240c1c8a 100644 --- a/tests/test/c/sml_test.c +++ b/tests/test/c/sml_test.c @@ -348,16 +348,16 @@ int sml_16384_Test() { pRes = taos_query(taos, "use sml_db"); taos_free_result(pRes); - TAOS_RES *res = taos_schemaless_insert(taos, (char **)sql, 1, TSDB_SML_LINE_PROTOCOL, 0); + pRes = taos_schemaless_insert(taos, (char **)sql, 1, TSDB_SML_LINE_PROTOCOL, 0); printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); int code = taos_errno(pRes); - taos_free_result(res); + taos_free_result(pRes); if(code) return code; const char *sql1[] = { "qelhxo,id=pnnqhsa,t0=t,t1=127i8 c0=f,c1=127i8,c11=L\"ncharColValue\",c10=t 1626006833639000000", }; - TAOS_RES *res1 = taos_schemaless_insert(taos, (char **)sql1, 1, TSDB_SML_LINE_PROTOCOL, 0); + pRes = taos_schemaless_insert(taos, (char **)sql1, 1, TSDB_SML_LINE_PROTOCOL, 0); printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes)); code = taos_errno(pRes); taos_free_result(pRes); diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 58095940f3afd88687ecd3dc5f1a6c4a9d8ff7d9..1f066daabe7103da10b33cf878468b6ec229107b 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -112,7 +112,7 @@ ELSE () COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}" INSTALL_COMMAND - COMMAND wget -c https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -O ${CMAKE_CURRENT_SOURCE_DIR}/upx.tar.xz && tar -xvJf ${CMAKE_CURRENT_SOURCE_DIR}/upx.tar.xz -C ${CMAKE_CURRENT_SOURCE_DIR} --strip-components 1 > /dev/null && ${CMAKE_CURRENT_SOURCE_DIR}/upx taosadapter || : + COMMAND wget -c https://github.com/upx/upx/releases/download/v3.96/upx-3.96-${PLATFORM_ARCH_STR}_linux.tar.xz -O $ENV{HOME}/upx.tar.xz && tar -xvJf $ENV{HOME}/upx.tar.xz -C $ENV{HOME}/ --strip-components 1 > /dev/null && $ENV{HOME}/upx taosadapter || : COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ COMMAND cmake -E copy ./example/config/taosadapter.toml ${CMAKE_BINARY_DIR}/test/cfg/